###################################################################### # Econometrics III - Time series # # Bayesian inference via Gibbs sampler for the Gaussian AR(2) # model with conditionally conjugate priors # ###################################################################### # Simulated data set.seed(1234) alpha.t = 0.01 beta.t = 0.5 gamma.t = 0.48 sig2.t = 0.01 n = 100 y = rep(0,n) for (t in 3:n) y[t] = rnorm(1,alpha.t+beta.t*y[t-1]+gamma.t*y[t-2],sqrt(sig2.t)) ts.plot(y) # Let us fit a Gaussian AR(2) to the data # y(t)|y(t-1)~N(alpha+beta*y(t-1)+gamma*y(t-2),sig2) # OLS estimation y1 = y[3:n] X = cbind(1,y[2:(n-1)],y[1:(n-2)]) theta.ols = solve(t(X)%*%X)%*%t(X)%*%y1 sig2.ols = sum((y1-X%*%theta.ols)^2)/n # Prior hyperparameters # alpha~N(a0,A0), beta~N(b0,B0), gamma~N(g0,G0) and sig2~IG(c0,d0) a0=0 A0=10 b0=0 B0=10 g0=0 G0=10 c0=0.01 d0=0.01 # Posterior inference via Gibbs sampler # MCMC set up M0=1000 M=10000 thin=1 niter = M0+M*thin draws = matrix(0,niter,4) # Initial values alpha = 0 beta = 0 gamma = 0 sig2 = 10 for (iter in 1:niter){ # Sampling from p(alpha|beta,gamma,sig2,y) z = y[3:n]-beta*y[2:(n-1)]-gamma*y[1:(n-2)] var = 1/(1/A0+n/sig2) mean = var*(a0/A0+sum(z)/sig2) alpha = rnorm(1,mean,sqrt(var)) # Sampling from p(beta|alpha,gamma,sig2,y) z = y[3:n]-alpha-gamma*y[1:(n-2)] var = 1/(1/B0+sum(y[2:(n-1)]^2)/sig2) mean = var*(b0/B0+sum(z*y[2:(n-1)])/sig2) beta = rnorm(1,mean,sqrt(var)) # Sampling from p(gamma|alpha,beta,sig2,y) z = y[3:n]-alpha-beta*y[2:(n-1)] var = 1/(1/G0+sum(y[1:(n-2)]^2)/sig2) mean = var*(g0/G0+sum(z*y[1:(n-2)])/sig2) gamma = rnorm(1,mean,sqrt(var)) # Sampling from p(sig2|alpha,beta,y) z = y[3:n]-alpha-beta*y[2:(n-1)]-gamma*y[1:(n-2)] c1 = c0 + n/2 d1 = d0 + sum(z^2)/2 sig2 = 1/rgamma(1,c1,d1) # Storing the Gibbs output for posterior inference draws[iter,] = c(alpha,beta,gamma,sig2) } draws = draws[seq(M0+1,niter,by=thin),] alphas = draws[,1] betas = draws[,2] gammas = draws[,3] sig2s = draws[,4] # Some plots of the marginal posteriors par(mfrow=c(2,4)) ts.plot(draws[,1],xlab="iterations",ylab="",main=expression(alpha)) ts.plot(draws[,2],xlab="iterations",ylab="",main=expression(beta)) ts.plot(draws[,3],xlab="iterations",ylab="",main=expression(gamma)) ts.plot(draws[,4],xlab="iterations",ylab="",main=expression(sigma^2)) hist(draws[,1],xlab="",prob=TRUE,main="") abline(v=alpha.t,col=2) abline(v=theta.ols[1],col=3) hist(draws[,2],xlab="",prob=TRUE,main="") abline(v=beta.t,col=2) abline(v=theta.ols[2],col=3) hist(draws[,3],xlab="",prob=TRUE,main="") abline(v=gamma.t,col=2) abline(v=theta.ols[3],col=3) hist(draws[,4],xlab="",prob=TRUE,main="") abline(v=sig2.t,col=2) abline(v=sig2.ols,col=3) pairs(draws,labels=c("alpha","beta","gamma","sig2")) # Predicting h steps ahead h = 10 yf = matrix(0,h,M) yf[1,] = rnorm(M,alphas+betas*y[n]+gammas*y[n-1],sqrt(sig2s)) yf[2,] = rnorm(M,alphas+betas*yf[1,]+gammas*y[n],sqrt(sig2s)) for (k in 3:h) yf[k,] = rnorm(M,alphas+betas*yf[k-1,]+gammas*yf[k-2,],sqrt(sig2s)) yf.mean = apply(yf,1,mean) yf.se = sqrt(apply(yf,1,var)) qyf = apply(yf,1,quantile,c(0.025,0.5,0.975)) par(mfrow=c(1,1)) plot(y,type="b",ylim=range(y,qyf),xlim=c(0,n+h),xlab="time",ylab="AR(2) data") points((n+1):(n+h),yf.mean,col=2,pch=16) points((n+1):(n+h),qyf[2,],col=4,pch=16) lines((n+1):(n+h),yf.mean+2*yf.se,col=2,lty=2) lines((n+1):(n+h),yf.mean-2*yf.se,col=2,lty=2) lines((n+1):(n+h),qyf[1,],col=4,lty=2) lines((n+1):(n+h),qyf[3,],col=4,lty=2)