############################################################################################ # # AR(1) PLUS NOISE MODEL - PARAMETER LEARNING # ############################################################################################ # # HEDIBERT FREITAS LOPES # Associate Professor of Econometrics and Statistics # The University of Chicago Booth School of Business # 5807 South Woodlawn Avenue # Chicago, Illinois, 60637 # Email : hlopes@ChicagoBooth.edu # URL: http://faculty.chicagobooth.edu/hedibert.lopes/research/ # ############################################################################################ rm(list=ls()) source("ar1plusnoise-parameterlearning-subroutines.R") # ------------------------------------------------------------------------ # Simulating 1st order DLM # ------------------------------------------------------------------------ set.seed(1234) n = 100 sig2 = 1.00 tau2 = 0.05 alpha = 0.05 beta = 0.95 x0 = alpha/(1-beta) tau = sqrt(tau2) sig = sqrt(sig2) sim = DLM.SIM(n,tau,sig,x0) y = sim$y x = sim$x # ------------------------------------------------------------------------ # Prior hyperparameters # ------------------------------------------------------------------------ m0 = x0 C0 = 1 a0 = c(0,1) A0 = diag(0.1,2)/tau2 iA = solve(A0) iAa = iA%*%a0 # ------------------------------------------------------------------------ # Other settings for closed form solution, MCMC, SMC, etc. # ------------------------------------------------------------------------ M = 1000 # PF and MCMC M1 = 1000 # PF with parameter learning M2 = 1000 # Smoothing via PF N = 50 niter = 2*M sd = 0.05 delta = 0.99 # Brute force sequential MCMC set.seed(121654) ts = seq(10,n,by=10) nt = length(ts) qmcmc.seq = array(0,c(nt,3,3)) for (t in 1:nt){ print(t) mcmc.seq = MCMC(y[1:ts[t]],alpha,beta,sig2,tau2,a0,A0,m0,C0,M,niter) qmcmc.seq[t,1,] = quantile(mcmc.seq[,ts[t]],c(0.05,0.5,0.95)) qmcmc.seq[t,2,] = quantile(mcmc.seq[,ts[t]+1],c(0.05,0.5,0.95)) qmcmc.seq[t,3,] = quantile(mcmc.seq[,ts[t]+2],c(0.05,0.5,0.95)) } # Particle filters: Liu&West (LW) versus Particle learning (PL) set.seed(2304) lw = LW(y,sig2,tau2,a0,A0,m0,C0,M1,delta) pl = PL(y,sig2,tau2,a0,A0,m0,C0,M1) par(mfrow=c(1,1)) qx.lw = t(apply(lw[,,1],1,quantile,c(0.05,0.5,0.95))) qx.pl = t(apply(pl[,,1],1,quantile,c(0.05,0.5,0.95))) ts.plot(qx.pl,lwd=2,ylim=range(qx.pl,qmcmc.seq[,1,]),col=grey(0.5),ylab=expression(x[t])) for (j in 1:3) lines(qx.lw[,j],lty=2) for (i in 1:3) points(ts,qmcmc.seq[,1,i],pch=16,cex=1.5) legend(n/2,max(qx.pl),legend=c("MCMC","LW","PL"),bty="n",lty=c(0,2,1), pch=c(16,NA_integer_,NA_integer_),cex=1.5) par(mfrow=c(1,1)) qx.lw = t(apply(lw[,,2],1,quantile,c(0.05,0.5,0.95))) qx.pl = t(apply(pl[,,2],1,quantile,c(0.05,0.5,0.95))) ts.plot(qx.pl,ylim=range(qx.lw,qx.pl),main="",ylab=expression(alpha)) for (j in 1:3) lines(qx.lw[,j],lty=2) for (j in 1:3) points(ts,qmcmc.seq[,2,j],pch=16,cex=1.5) legend(n/2,max(qx.pl),legend=c("MCMC","LW","PL"),bty="n",lty=c(0,2,1), pch=c(16,NA_integer_,NA_integer_),cex=1.5) par(mfrow=c(1,1)) qx.lw = t(apply(lw[,,3],1,quantile,c(0.05,0.5,0.95))) qx.pl = t(apply(pl[,,3],1,quantile,c(0.05,0.5,0.95))) ts.plot(qx.pl,ylim=range(qx.lw,qx.pl),main="",ylab=expression(beta)) for (j in 1:3) lines(qx.lw[,j],lty=2) for (j in 1:3) points(ts,qmcmc.seq[,3,j],pch=16,cex=1.5) legend(n/2,max(qx.pl),legend=c("MCMC","LW","PL"),bty="n",lty=c(0,2,1), pch=c(16,NA_integer_,NA_integer_),cex=1.5)