# Let us consider the intra-daily realized volatility of Alcoa stock from # January 2, 2003 to May 7, 2004 for 340 observations. # The daily realized volatility used is the sum of squares of intraday # 10-minute log returns measured in percentage. # No overnight returns or the first 10-minute intraday returns are used. # The series used is the logarithm of the daily realized volatility. # The transactions data are obtained from the TAQ database of NYSE. # Daily realized volatility series of Alcoa stock: (5m, 10m, 20m) dlm.minuslike = function(par){ phi = par[1] V = exp(par[2]) W = exp(par[3]) n = length(y) m = m0 C = C0 loglike = 0.0 for (t in 1:n){ a = phi*m R = phi^2*C+W loglike = loglike + dnorm(y[t],a,sqrt(R+V),log=TRUE) C = 1/(1/R+1/V) m = C*(a/R+y[t]/V) } return(-loglike) } dlm.kfks = function(y,m0,c0,phi,V,W){ n = length(y) mf = rep(0,n) Cf = rep(0,n) af = rep(0,n) Rf = rep(0,n) m = m0 C = C0 for (t in 1:n){ a = phi*m R = phi^2*C+W C = 1/(1/R+1/V) m = C*(a/R+y[t]/V) af[t] = a Rf[t] = R Cf[t] = C mf[t] = m } mb = rep(0,n) Cb = rep(0,n) mb[n] = mf[n] Cb[n] = Cf[n] for (t in (n-1):1){ B = phi*Cf[t]/Rf[t+1] Cb[t] = Cf[t] - B^2*(Rf[t+1]-Cb[t+1]) mb[t] = mf[t] + B*(mb[t+1]-af[t+1]) } return(list(mf=mf,Cf=Cf,mb=mb,Cb=Cb)) } ffbs = function(y,m0,c0,phi,V,W){ n = length(y) mf = rep(0,n) Cf = rep(0,n) m = m0 C = C0 for (t in 1:n){ a = phi*m R = phi^2*C+W C = 1/(1/R+1/V) m = C*(a/R+y[t]/V) Cf[t] = C mf[t] = m } x = rep(0,n) x[n] = rnorm(1,mf[n],sqrt(Cf[n])) for (t in (n-1):1){ var = 1/(1/Cf[t]+phi^2/W) mean = var*(mf[t]/Cf[t]+phi*x[t+1]/W) x[t] = rnorm(1,mean,sqrt(var)) } return(x) } rvalcoa = read.table("alcoa.txt",header=FALSE) y = log(rvalcoa[,2]) n = length(y) par(mfrow=c(1,1)) ts.plot(y) # initial state mean/variance m0 = 0 C0 = 1 # MAXIMUM LIKELIHOOD ESTIMATION ############################### # Nonlinear optimizer par0 = c(1,0,0) dlm.mle = nlm(dlm.minuslike,par0) phi.hat = dlm.mle$estimate[1] V.hat = exp(dlm.mle$estimate[2]) W.hat = exp(dlm.mle$estimate[3]) # Kalman filter and Kalman smoother conditional on ML estimates run = dlm.kfks(y,m0,c0,phi.hat,V.hat,W.hat) limf = cbind(run$mf-2*sqrt(run$Cf),run$mf,run$mf+2*sqrt(run$Cf)) limb = cbind(run$mb-2*sqrt(run$Cb),run$mb,run$mb+2*sqrt(run$Cb)) par(mfrow=c(1,1)) plot(limf[,2],ylim=range(limf,limb),ylab="",type="l",xlab="Time") lines(limf[,1]) lines(limf[,3]) lines(limb[,1],col=2) lines(limb[,2],col=2) lines(limb[,3],col=2) legend("topright",legend=c("KF","KS"),lty=1,col=1:2) #BAYESIAN ESTIMATION VIA MARKOV CHAIN MONTE CARLO ################################################# # MCMC via FFBS phi0 = 0.95 D0 = 100.0 n0 = 2 V0 = 1 nu0 = 2 W0 = 0.01 # Initial value phi1 = phi.hat V1 = V.hat W1 = W.hat M0 = 1000 M = 1000 L = 1 niter = M0+M*L draws = matrix(0,niter,n+3) for (iter in 1:niter){ # sampling the states x1 = ffbs(y,m0,C0,phi1,V1,W1) # sampling phi var = 1/(1/D0+sum(x1[1:(n-1)]^2)/W1) mean = var*(phi0/D0+sum(x1[1:(n-1)]*x1[2:n])/W1) phi1 = rnorm(1,mean,sqrt(var)) # sampling V a = n0+n b = n0*V0 + sum((y-x1)^2) V1 = 1/rgamma(1,a/2,b/2) # sampling W a = nu0+(n-1) b = nu0*W0 + sum((x1[2:n]-phi1*x1[1:(n-1)])^2) W1 = 1/rgamma(1,a/2,b/2) draws[iter,] = c(phi1,V1,W1,x1) } ind = seq((M0+1),niter,by=L) par(mfrow=c(2,3)) ts.plot(draws[,1],xlab="iterations",ylab="",main=expression(phi)) ts.plot(draws[,2],xlab="iterations",ylab="",main="V") ts.plot(draws[,3],xlab="iterations",ylab="",main="W") hist(draws[ind,1],prob=TRUE,main="",xlab="") abline(v=phi.hat,col=2) hist(draws[ind,2],prob=TRUE,main="",xlab="") abline(v=V.hat,col=2) hist(draws[ind,3],prob=TRUE,main="",xlab="") abline(v=W.hat,col=2) qx = t(apply(draws[ind,4:(n+3)],2,quantile,c(0.05,0.5,0.95))) par(mfrow=c(1,1)) ts.plot(qx) lines(limb[,1],col=2) lines(limb[,2],col=2) lines(limb[,3],col=2) legend("topright",legend=c("KS (MLE)","KS (BAYES)"),lty=1,col=1:2)