#################################################################################### # # Bayesian Gaussian AR(p) modeling of the Canadian Lynx time series # # Our goal here is to compare MCMC schemes # # Parametes: beta0,beta1,...,betap,sigma2 # # A) Gibbs sampler block moves: (beta0,...,betap) & sigma2 # 2 blocks: 1st block with p+1 component, 2nd block with 1 component # B) Gibbs sampler component-wise moves: beta0,beta1,...,betap,sigma2 # (p+2) single components # C) RW-MH+Gibbs sampler component-wise moves: beta0,beta1,...,betap,sigma2 # (p+2) single components and RW-MH steps for beta0,...,betap. # # Lopes and Salazar (2004) # Bayesian model uncertainty in smooth transition autoregressions # Journal of Time Series Analysis, 27, 99-117. # http://hedibert.org/wp-content/uploads/2013/12/lopes-salazar-2006a.pdf # # Hedibert F. Lopes # www.hedibert.org # February 3rd 2021 # #################################################################################### library("astsa") y = Lynx y = y-mean(y) y = y/sqrt(var(y)) par(mfrow=c(1,1)) ts.plot(y,main="Canadian Lynx",ylab="") # Running the model with p=3 # -------------------------- data = y p = 3 nd = length(data) y = data[(p+1):nd] X = cbind(1,data[p:(nd-1)]) for (i in 2:p) X = cbind(X,data[(p-i+1):(nd-i)]) n = nrow(X) # prior hyperparameters b0 = rep(0,p+1) B0 = diag(25,p+1) nu0 = 18 sig20 = 0.01666667 # posterior hyperparameters nu0sig20 = nu0*sig20 iB0 = solve(B0) iB0b0 = iB0%*%b0 B1 = solve(iB0+t(X)%*%X) tcB1 = t(chol(B1)) b1 = B1%*%(iB0b0+t(X)%*%y) nu1 = nu0 + n sig21 = ((nu0sig20+sum((y-X%*%b1)*y)+t(b0-b1)%*%iB0b0)/nu1)[1] se = sqrt(nu1/(nu1-2)*sig21*diag(B1)) tab = round(cbind(b1,se,b1/se,2*(1-pt(abs(b1/se),df=nu1))),5) colnames(tab)=c("Mean","StDev","t","tail") rownames(tab)=c("Intercept","beta1","beta2","beta3") tab # Gibbs sampler: components (beta and sigma2) # ------------------------------------------- set.seed(12345) M = 2000 burnin = 10000 skip = 10 niter = burnin+skip*M draws = matrix(0,niter,p+2) beta.d = rep(0,p+1) runtime = system.time({ for (iter in 1:niter){ sig2.d = 1/rgamma(1,nu1/2,(nu0sig20+sum((y-X%*%beta.d)^2))/2) beta.d = b1+sqrt(sig2.d)*tcB1%*%rnorm(p+1) draws[iter,] = c(beta.d,sig2.d) }}) draws = draws[seq(burnin+1,niter,by=skip),] # Gibbs sampler: components (beta0,beta1,...,betap,sigma2) # -------------------------------------------------------- ind = 1:(p+1) set.seed(54321) niter = burnin+skip*M draws1 = matrix(0,niter,p+2) beta.d = rep(0,p+1) runtime1 = system.time({ for (iter in 1:niter){ sig2.d = 1/rgamma(1,nu1/2,(nu0sig20+sum((y-X%*%beta.d)^2))/2) for (i in 1:(p+1)){ XX = X[,ind!=i] yy = y-XX%*%beta.d[ind!=i] xx = X[,ind==i] var = 1/(sum(xx^2)+1/B0[i,i]) mean = var*(sum(xx*yy)+b0[i]/B0[i,i]) beta.d[i] = rnorm(1,mean,sqrt(sig2.d*var)) } draws1[iter,] = c(beta.d,sig2.d) }}) draws1 = draws1[seq(burnin+1,niter,by=skip),] # MCMC: cycling beta via RW-MH and Gibbs for sigma2 # ------------------------------------------------- ind = 1:(p+1) set.seed(54321) niter = burnin+skip*M draws2 = matrix(0,niter,p+2) beta.d = rep(0,p+1) runtime2 = system.time({ for (iter in 1:niter){ sig2.d = 1/rgamma(1,nu1/2,(nu0sig20+sum((y-X%*%beta.d)^2))/2) for (i in 1:(p+1)){ XX = X[,ind!=i] yy = y-XX%*%beta.d[ind!=i] xx = X[,ind==i] beta1 = rnorm(1,beta.d[i],0.01) num = sum(dnorm(yy,xx*beta1,sqrt(sig2.d),log=TRUE)) den = sum(dnorm(yy,xx*beta.d[i],sqrt(sig2.d),log=TRUE)) accept = min(0,num-den) if (log(runif(1))