1 AR(1) plus noise model

In this exercise, we consider the AR(1) plus noise model, which is a special case of the more general normal dynamic linear model we’ve just studied. The follow assumes that

\[\begin{eqnarray*} y_t &=& x_t + \epsilon_t \qquad\qquad \epsilon_t \sim N(0,\sigma^2)\\ x_t &=& \alpha + \beta x_t + \omega_t \qquad \omega_t \sim N(0,\tau^2) \end{eqnarray*}\] for \(t=1,\ldots,n\) and \(x_0 \sim N(m_0,C_0)\).

2 Simulating some data

set.seed(3141593)
n     = 100
alpha = 0.0
beta  = 0.98
sig2  = 1.0
tau2  = 0.5
x0    = alpha/(1-beta)
e     = rnorm(n,0,sqrt(sig2))
w     = rnorm(n,0,sqrt(tau2))
y     = rep(0,n)
x     = rep(0,n)
x[1]  = rnorm(1,alpha+beta*x0,sqrt(tau2))
y[1]  = rnorm(1,x[1],sqrt(sig2))
for (t in 2:n){
  x[t] = rnorm(1,alpha+beta*x[t-1],sqrt(tau2))
  y[t] = rnorm(1,x[t],sqrt(sig2))
}
x.true     = x
sig2.true  = sig2
tau2.true  = tau2
x0.true    = x0
alpha.true = alpha
beta.true  = beta

par(mfrow=c(1,1))
plot(y,xlab="Time",ylab="",pch=16)
lines(x,col=2,type="b",pch=16)

3 Sampling the \(x_t\)s individually (Gibbs sampler)

Where we cycle through \(n+4\) univariate full conditional distributions for the components \(x_1,\ldots,x_n,\alpha,\beta,\sigma^2\) and \(\tau^2\).

The prior for the time-invariant parameters, \(\alpha,\beta,\sigma^2\) and \(\tau^2\), are as follows \[\begin{eqnarray*} p(\alpha,\beta) &\propto& 1 \qquad \mbox{(non-informative prior)}\\ \sigma^2 &\sim& IG(\nu_0/2,\nu_0\sigma_0^2/2)\\ \tau^2 &\sim& IG(\eta_0/2,\eta_0\tau_0^2/2). \end{eqnarray*}\]

Therefore, the joint posterior distribution is given by \[ p(x_0,x_1,\ldots,x_n,\alpha,\beta,\sigma^2,\tau^2) \propto p(\alpha,\beta)p(\sigma^2)p(\tau^2) p(x_0)\prod_{t=1}^n p(x_t|x_{t-1},\alpha,\beta,\tau^2) \prod_{t=1}^n p(y_t|x_t,\sigma^2). \] It easy to see that the full conditionals can be written as \[\begin{eqnarray*} p(x_0|\mbox{all}) &\propto& p(x_0)p(x_1|x_0,\alpha,\beta,\tau^2)\\ p(x_t|\mbox{all}) &\propto& p(x_t|x_{t-1},\alpha,\beta,\tau^2)p(x_{t+1}|x_t,\alpha,\beta,\tau^2)p(y_t|x_t,\sigma^2) \qquad t=1,\ldots,n\\ p(\alpha,\beta|\mbox{all}) &\propto& \prod_{t=1}^n p(x_t|x_{t-1},\alpha,\beta,\tau^2)\\ p(\tau^2|\mbox{all}) &\propto& p(\tau^2)\prod_{t=1}^n p(x_t|x_{t-1},\alpha,\beta,\tau^2)\\ p(\sigma^2|\mbox{all}) &\propto& p(\sigma^2)\prod_{t=1}^n p(y_t|x_t,\sigma^2), \end{eqnarray*}\] all of which are univariate, bivariate or inverse-gamma distributions.

# Set up of prior hyperparameters
m0        = 0
C0        = 100
nu0       = 10
sig20     = 1
eta0      = 10
tau20     = 1
nu0sig20  = nu0*sig20
eta0tau20 = eta0*tau20
nu0n      = (nu0+n)/2
eta0n     = (eta0+n)/2

# initial values
x     = x.true
x0    = x0.true
alpha = alpha.true
beta  = beta.true
# MCMC set up
thin   = 1
burnin = 1000
M      = 1000
niter  = burnin+thin*M
draws  = matrix(0,niter,n+5)
for (iter in 1:niter){
  # Learning sig2
  sig2 = 1/rgamma(1,nu0n,(nu0sig20+sum((y-x)^2))/2)
  # Learning tau2
  xx = c(x0,x[1:(n-1)])
  tau2 = 1/rgamma(1,eta0n,(eta0tau20+sum((x-alpha-beta*xx)^2))/2)
  # Learning (alpha,beta)
  X = cbind(1,xx)
  v = solve(t(X)%*%X)
  m = v%*%t(X)%*%x
  ab = m+t(chol(v))%*%rnorm(2,0,sqrt(tau2))
  alpha = ab[1]
  beta  = ab[2]
  # Learning x0
  var = 1/(1/C0+beta^2/tau2)
  mean = var*(m0/C0+(x[1]-alpha)*beta/tau2)
  x0 = rnorm(1,mean,sqrt(var))
  # Learning x(1)
  var = 1/((1+beta^2)/tau2+1/sig2) 
  mean = var*(beta*(x[2]+x0)/tau2+alpha*(1-beta)/tau2+y[1]/sig2)
  x[1] = rnorm(1,mean,sqrt(var))
  # Learning x(2)....x(n-1)
  var = 1/((1+beta^2)/tau2+1/sig2) 
  for (t in 2:(n-1)){
    mean = var*(beta*(x[t+1]+x[t-1])/tau2+alpha*(1-beta)/tau2+y[t]/sig2)
    x[t] = rnorm(1,mean,sqrt(var))
  }
  # Learning x(n)
  var = 1/(1/tau2+1/sig2) 
  mean = var*((alpha+beta*x[n-1])/tau2+y[n]/sig2)
  x[n] = rnorm(1,mean,sqrt(var))
  # Storing the draws
  draws[iter,] = c(x,sig2,tau2,alpha,beta,x0)
}
ind = seq(burnin+1,niter,by=thin)
draws = draws[ind,]


par(mfrow=c(4,3))
ts.plot(draws[,n+1],xlab="iterations",ylab="",main=expression(sigma^2))
abline(h=sig2.true,col=2)
acf(draws[,n+1],main="")
hist(draws[,n+1],prob=TRUE,main="",xlab="")
abline(v=sig2.true,col=2)
ts.plot(draws[,n+2],xlab="iterations",ylab="",main=expression(tau^2))
abline(h=tau2.true,col=2)
acf(draws[,n+2],main="")
hist(draws[,n+2],prob=TRUE,main="",xlab="")
abline(v=tau2.true,col=2)
ts.plot(draws[,n+3],xlab="iterations",ylab="",main=expression(alpha))
abline(h=alpha.true,col=2)
acf(draws[,n+3],main="")
hist(draws[,n+3],prob=TRUE,main="",xlab="")
abline(v=alpha.true,col=2)
ts.plot(draws[,n+4],xlab="iterations",ylab="",main=expression(beta))
abline(h=beta.true,col=2)
acf(draws[,n+4],main="")
hist(draws[,n+4],prob=TRUE,main="",xlab="")
abline(v=beta.true,col=2)

3.1 Autocorrelation functions of the Markov chains

acf1 = acf(draws[,1],plot=FALSE)
plot(acf1$acf,type="l",xlab="Lag",ylab="ACF",col=grey(0.75),ylim=c(-0.2,1))
for (i in 2:n){
  acf1 = acf(draws[,i],plot=FALSE)
  lines(acf1$acf,col=grey(0.75))
}
title("ACF for each x(t)")
abline(h=0,lty=3)

3.2 Marginal quantiles from \(p(x_1,\ldots,x_n|y_1,\ldots,y_n)\)

qx = t(apply(draws[,1:n],2,quantile,c(0.025,0.5,0.975)))
ts.plot(qx,xlab="Time",col=c(3,2,3),lty=c(2,1,2))
lines(x.true,col=4)

4 Sampling \(x_t\)s jointly (FFBS)

The main difference between the current MCMC scheme (joint sampling the states) versus the previous MCMC scheme (individually sampling the states) is the fact that we can use the forward filtering and backward sampling scheme (FFBS) to sample from \[ p(x_1,\ldots,x_n|x_0,\alpha,\beta,\tau^2,\sigma^2,y_1,\ldots,y_n). \]

# initial values
x     = x.true
x0    = x0.true
alpha = alpha.true
beta  = beta.true
# MCMC set up
thin   = 1
burnin = 5000
M      = 5000
niter  = burnin+thin*M
draws1 = matrix(0,niter,n+5)
mts    = rep(0,n)
Cts    = rep(0,n)
for (iter in 1:niter){
  # Learning sig2
  sig2 = 1/rgamma(1,nu0n,(nu0sig20+sum((y-x)^2))/2)
  # Learning tau2
  xx = c(x0,x[1:(n-1)])
  tau2 = 1/rgamma(1,eta0n,(eta0tau20+sum((x-alpha-beta*xx)^2))/2)
  # Learning (alpha,beta)
  X = cbind(1,xx)
  v = solve(t(X)%*%X)
  m = v%*%t(X)%*%x
  ab = m+t(chol(v))%*%rnorm(2,0,sqrt(tau2))
  alpha = ab[1]
  beta  = ab[2]
  # Learning x0
  var  = 1/(1/C0+beta^2/tau2)
  mean = var*(m0/C0+(x[1]-alpha)*beta/tau2)
  x0   = rnorm(1,mean,sqrt(var))
  # Learning x1,...,xn
  mt = m0
  Ct = C0
  for (t in 1:n){
    at = alpha+beta*mt
    Rt = beta^2*Ct+tau2
    Qt = Rt+sig2
    At = Rt/Qt
    mt = (1-At)*at + At*y[t]
    Ct = Rt - At^2*Qt
    mts[t] = mt
    Cts[t] = Ct
  }  
  x[n] = rnorm(1,mts[n],sqrt(Cts[n]))
  for (t in (n-1):1){
      v = 1/(beta^2/tau2+1/Cts[t])
      m = v*(beta*(x[t+1]-alpha)/tau2+mts[t]/Cts[t])
    x[t] = rnorm(1,m,sqrt(v))
    }
  # Storing the draws
  draws1[iter,] = c(x,sig2,tau2,alpha,beta,x0)
}
ind = seq(burnin+1,niter,by=thin)
draws1 = draws1[ind,]


par(mfrow=c(4,3))
ts.plot(draws1[,n+1],xlab="iterations",ylab="",main=expression(sigma^2))
abline(h=sig2.true,col=2)
acf(draws1[,n+1],main="")
hist(draws1[,n+1],prob=TRUE,main="",xlab="")
abline(v=sig2.true,col=2)
ts.plot(draws1[,n+2],xlab="iterations",ylab="",main=expression(tau^2))
abline(h=tau2.true,col=2)
acf(draws1[,n+2],main="")
hist(draws1[,n+2],prob=TRUE,main="",xlab="")
abline(v=tau2.true,col=2)
ts.plot(draws1[,n+3],xlab="iterations",ylab="",main=expression(alpha))
abline(h=alpha.true,col=2)
acf(draws1[,n+3],main="")
hist(draws1[,n+3],prob=TRUE,main="",xlab="")
abline(v=alpha.true,col=2)
ts.plot(draws1[,n+4],xlab="iterations",ylab="",main=expression(beta))
abline(h=beta.true,col=2)
acf(draws1[,n+4],main="")
hist(draws1[,n+4],prob=TRUE,main="",xlab="")
abline(v=beta.true,col=2)

4.1 Autocorrelation functions of the Markov chains

acf1 = acf(draws1[,1],plot=FALSE)
plot(acf1$acf,type="l",xlab="Lag",ylab="ACF",col=grey(0.75),ylim=c(-0.2,1))
for (i in 2:n){
  acf1 = acf(draws1[,i],plot=FALSE)
  lines(acf1$acf,col=grey(0.75))
}
title("ACF for each x(t)")
abline(h=0,lty=3)

4.2 Marginal quantiles from \(p(x_1,\ldots,x_n|y_1,\ldots,y_n)\)

qx = t(apply(draws1[,1:n],2,quantile,c(0.025,0.5,0.975)))
ts.plot(qx,xlab="Time",col=c(3,2,3),lty=c(2,1,2))
lines(x.true,col=4)

5 Comparing both MCMC schemes

par(mfrow=c(3,2))
acf1 = acf(draws[,1],plot=FALSE)
plot(acf1$acf,type="l",xlab="Lag",ylab="ACF",col=grey(0.75),ylim=c(-0.2,1))
for (i in 2:n){
  acf1 = acf(draws[,i],plot=FALSE)
  lines(acf1$acf)
}
for (i in 2:n){
  acf1 = acf(draws1[,i],plot=FALSE)
  lines(acf1$acf,col=grey(0.75))
}
abline(h=0,lty=3)
legend("topright",legend=c("Individual sampling","Joint sampling"),col=c(1,grey(0.75)),lty=1,lwd=2,bty="n")

qx1 = t(apply(draws[,1:n],2,quantile,c(0.025,0.5,0.975)))
qx2= t(apply(draws1[,1:n],2,quantile,c(0.025,0.5,0.975)))
ts.plot(qx1,xlab="Time")
for (i in 1:3)
  lines(qx2[,i],col=grey(0.75))

plot(density(draws[,n+1]),xlab="",main=expression(sigma^2))
lines(density(draws1[,n+1]),col=grey(0.75))
plot(density(draws[,n+2]),xlab="",main=expression(tau^2))
lines(density(draws1[,n+2]),col=grey(0.75))
plot(density(draws[,n+3]),xlab="",main=expression(alpha))
lines(density(draws1[,n+3]),col=grey(0.75))
plot(density(draws[,n+4]),xlab="",main=expression(beta))
lines(density(draws1[,n+4]),col=grey(0.75))

---
title: "AR(1) plus noise model"
author: "Hedibert Freitas Lopes"
date: "`r Sys.Date()`"
output:
  html_document:
    theme: paper
    highlight: pygments
    toc: true
    toc_depth: 3
    toc_collapsed: true
#    toc_float: true
    code_download: true
    number_sections: true
---
  


# AR(1) plus noise model

In this exercise, we consider the AR(1) plus noise model, which is a special case of the more general normal dynamic linear model we've just studied.  The follow assumes that 

\begin{eqnarray*}
y_t &=& x_t + \epsilon_t             \qquad\qquad    \epsilon_t \sim N(0,\sigma^2)\\
x_t &=& \alpha + \beta x_t + \omega_t  \qquad  \omega_t \sim N(0,\tau^2)
\end{eqnarray*}
for $t=1,\ldots,n$ and $x_0 \sim N(m_0,C_0)$.


# Simulating some data

```{r fig.align='center', fig.width=7, fig.height=5}
set.seed(3141593)
n     = 100
alpha = 0.0
beta  = 0.98
sig2  = 1.0
tau2  = 0.5
x0    = alpha/(1-beta)
e     = rnorm(n,0,sqrt(sig2))
w     = rnorm(n,0,sqrt(tau2))
y     = rep(0,n)
x     = rep(0,n)
x[1]  = rnorm(1,alpha+beta*x0,sqrt(tau2))
y[1]  = rnorm(1,x[1],sqrt(sig2))
for (t in 2:n){
  x[t] = rnorm(1,alpha+beta*x[t-1],sqrt(tau2))
  y[t] = rnorm(1,x[t],sqrt(sig2))
}
x.true     = x
sig2.true  = sig2
tau2.true  = tau2
x0.true    = x0
alpha.true = alpha
beta.true  = beta

par(mfrow=c(1,1))
plot(y,xlab="Time",ylab="",pch=16)
lines(x,col=2,type="b",pch=16)
```

# Sampling the $x_t$s individually (Gibbs sampler)
Where we cycle through $n+4$ univariate full conditional distributions for the components $x_1,\ldots,x_n,\alpha,\beta,\sigma^2$ and $\tau^2$.

The prior for the time-invariant parameters, $\alpha,\beta,\sigma^2$ and $\tau^2$, are as follows
\begin{eqnarray*}
p(\alpha,\beta) &\propto& 1 \qquad \mbox{(non-informative prior)}\\
\sigma^2 &\sim& IG(\nu_0/2,\nu_0\sigma_0^2/2)\\
\tau^2 &\sim& IG(\eta_0/2,\eta_0\tau_0^2/2).
\end{eqnarray*}

Therefore, the joint posterior distribution is given by
$$
p(x_0,x_1,\ldots,x_n,\alpha,\beta,\sigma^2,\tau^2) \propto p(\alpha,\beta)p(\sigma^2)p(\tau^2)
p(x_0)\prod_{t=1}^n p(x_t|x_{t-1},\alpha,\beta,\tau^2) \prod_{t=1}^n p(y_t|x_t,\sigma^2).
$$
It easy to see that the full conditionals can be written as
\begin{eqnarray*}
p(x_0|\mbox{all}) &\propto& p(x_0)p(x_1|x_0,\alpha,\beta,\tau^2)\\
p(x_t|\mbox{all}) &\propto& p(x_t|x_{t-1},\alpha,\beta,\tau^2)p(x_{t+1}|x_t,\alpha,\beta,\tau^2)p(y_t|x_t,\sigma^2) \qquad t=1,\ldots,n\\
p(\alpha,\beta|\mbox{all}) &\propto& \prod_{t=1}^n p(x_t|x_{t-1},\alpha,\beta,\tau^2)\\
p(\tau^2|\mbox{all}) &\propto& p(\tau^2)\prod_{t=1}^n p(x_t|x_{t-1},\alpha,\beta,\tau^2)\\
p(\sigma^2|\mbox{all}) &\propto& p(\sigma^2)\prod_{t=1}^n p(y_t|x_t,\sigma^2),
\end{eqnarray*}
all of which are univariate, bivariate or inverse-gamma distributions.

```{r fig.align='center', fig.width=8, fig.height=12}
# Set up of prior hyperparameters
m0        = 0
C0        = 100
nu0       = 10
sig20     = 1
eta0      = 10
tau20     = 1
nu0sig20  = nu0*sig20
eta0tau20 = eta0*tau20
nu0n      = (nu0+n)/2
eta0n     = (eta0+n)/2

# initial values
x     = x.true
x0    = x0.true
alpha = alpha.true
beta  = beta.true
# MCMC set up
thin   = 1
burnin = 1000
M      = 1000
niter  = burnin+thin*M
draws  = matrix(0,niter,n+5)
for (iter in 1:niter){
  # Learning sig2
  sig2 = 1/rgamma(1,nu0n,(nu0sig20+sum((y-x)^2))/2)
  # Learning tau2
  xx = c(x0,x[1:(n-1)])
  tau2 = 1/rgamma(1,eta0n,(eta0tau20+sum((x-alpha-beta*xx)^2))/2)
  # Learning (alpha,beta)
  X = cbind(1,xx)
  v = solve(t(X)%*%X)
  m = v%*%t(X)%*%x
  ab = m+t(chol(v))%*%rnorm(2,0,sqrt(tau2))
  alpha = ab[1]
  beta  = ab[2]
  # Learning x0
  var = 1/(1/C0+beta^2/tau2)
  mean = var*(m0/C0+(x[1]-alpha)*beta/tau2)
  x0 = rnorm(1,mean,sqrt(var))
  # Learning x(1)
  var = 1/((1+beta^2)/tau2+1/sig2) 
  mean = var*(beta*(x[2]+x0)/tau2+alpha*(1-beta)/tau2+y[1]/sig2)
  x[1] = rnorm(1,mean,sqrt(var))
  # Learning x(2)....x(n-1)
  var = 1/((1+beta^2)/tau2+1/sig2) 
  for (t in 2:(n-1)){
    mean = var*(beta*(x[t+1]+x[t-1])/tau2+alpha*(1-beta)/tau2+y[t]/sig2)
    x[t] = rnorm(1,mean,sqrt(var))
  }
  # Learning x(n)
  var = 1/(1/tau2+1/sig2) 
  mean = var*((alpha+beta*x[n-1])/tau2+y[n]/sig2)
  x[n] = rnorm(1,mean,sqrt(var))
  # Storing the draws
  draws[iter,] = c(x,sig2,tau2,alpha,beta,x0)
}
ind = seq(burnin+1,niter,by=thin)
draws = draws[ind,]


par(mfrow=c(4,3))
ts.plot(draws[,n+1],xlab="iterations",ylab="",main=expression(sigma^2))
abline(h=sig2.true,col=2)
acf(draws[,n+1],main="")
hist(draws[,n+1],prob=TRUE,main="",xlab="")
abline(v=sig2.true,col=2)
ts.plot(draws[,n+2],xlab="iterations",ylab="",main=expression(tau^2))
abline(h=tau2.true,col=2)
acf(draws[,n+2],main="")
hist(draws[,n+2],prob=TRUE,main="",xlab="")
abline(v=tau2.true,col=2)
ts.plot(draws[,n+3],xlab="iterations",ylab="",main=expression(alpha))
abline(h=alpha.true,col=2)
acf(draws[,n+3],main="")
hist(draws[,n+3],prob=TRUE,main="",xlab="")
abline(v=alpha.true,col=2)
ts.plot(draws[,n+4],xlab="iterations",ylab="",main=expression(beta))
abline(h=beta.true,col=2)
acf(draws[,n+4],main="")
hist(draws[,n+4],prob=TRUE,main="",xlab="")
abline(v=beta.true,col=2)
```

## Autocorrelation functions of the Markov chains

```{r fig.align='center', fig.width=8, fig.height=5}
acf1 = acf(draws[,1],plot=FALSE)
plot(acf1$acf,type="l",xlab="Lag",ylab="ACF",col=grey(0.75),ylim=c(-0.2,1))
for (i in 2:n){
  acf1 = acf(draws[,i],plot=FALSE)
  lines(acf1$acf,col=grey(0.75))
}
title("ACF for each x(t)")
abline(h=0,lty=3)
```


## Marginal quantiles from $p(x_1,\ldots,x_n|y_1,\ldots,y_n)$

```{r fig.align='center', fig.width=8, fig.height=5}
qx = t(apply(draws[,1:n],2,quantile,c(0.025,0.5,0.975)))
ts.plot(qx,xlab="Time",col=c(3,2,3),lty=c(2,1,2))
lines(x.true,col=4)
```

# Sampling $x_t$s jointly (FFBS)

The main difference between the current MCMC scheme (joint sampling the states) versus the previous MCMC scheme (individually sampling the states) is the fact that we can use the forward filtering and backward sampling scheme (FFBS) to sample from
$$
p(x_1,\ldots,x_n|x_0,\alpha,\beta,\tau^2,\sigma^2,y_1,\ldots,y_n).
$$

```{r fig.align='center', fig.width=8, fig.height=12}
# initial values
x     = x.true
x0    = x0.true
alpha = alpha.true
beta  = beta.true
# MCMC set up
thin   = 1
burnin = 5000
M      = 5000
niter  = burnin+thin*M
draws1 = matrix(0,niter,n+5)
mts    = rep(0,n)
Cts    = rep(0,n)
for (iter in 1:niter){
  # Learning sig2
  sig2 = 1/rgamma(1,nu0n,(nu0sig20+sum((y-x)^2))/2)
  # Learning tau2
  xx = c(x0,x[1:(n-1)])
  tau2 = 1/rgamma(1,eta0n,(eta0tau20+sum((x-alpha-beta*xx)^2))/2)
  # Learning (alpha,beta)
  X = cbind(1,xx)
  v = solve(t(X)%*%X)
  m = v%*%t(X)%*%x
  ab = m+t(chol(v))%*%rnorm(2,0,sqrt(tau2))
  alpha = ab[1]
  beta  = ab[2]
  # Learning x0
  var  = 1/(1/C0+beta^2/tau2)
  mean = var*(m0/C0+(x[1]-alpha)*beta/tau2)
  x0   = rnorm(1,mean,sqrt(var))
  # Learning x1,...,xn
  mt = m0
  Ct = C0
  for (t in 1:n){
    at = alpha+beta*mt
    Rt = beta^2*Ct+tau2
    Qt = Rt+sig2
    At = Rt/Qt
    mt = (1-At)*at + At*y[t]
    Ct = Rt - At^2*Qt
    mts[t] = mt
    Cts[t] = Ct
  }  
  x[n] = rnorm(1,mts[n],sqrt(Cts[n]))
  for (t in (n-1):1){
  	  v = 1/(beta^2/tau2+1/Cts[t])
  	  m = v*(beta*(x[t+1]-alpha)/tau2+mts[t]/Cts[t])
    x[t] = rnorm(1,m,sqrt(v))
  	}
  # Storing the draws
  draws1[iter,] = c(x,sig2,tau2,alpha,beta,x0)
}
ind = seq(burnin+1,niter,by=thin)
draws1 = draws1[ind,]


par(mfrow=c(4,3))
ts.plot(draws1[,n+1],xlab="iterations",ylab="",main=expression(sigma^2))
abline(h=sig2.true,col=2)
acf(draws1[,n+1],main="")
hist(draws1[,n+1],prob=TRUE,main="",xlab="")
abline(v=sig2.true,col=2)
ts.plot(draws1[,n+2],xlab="iterations",ylab="",main=expression(tau^2))
abline(h=tau2.true,col=2)
acf(draws1[,n+2],main="")
hist(draws1[,n+2],prob=TRUE,main="",xlab="")
abline(v=tau2.true,col=2)
ts.plot(draws1[,n+3],xlab="iterations",ylab="",main=expression(alpha))
abline(h=alpha.true,col=2)
acf(draws1[,n+3],main="")
hist(draws1[,n+3],prob=TRUE,main="",xlab="")
abline(v=alpha.true,col=2)
ts.plot(draws1[,n+4],xlab="iterations",ylab="",main=expression(beta))
abline(h=beta.true,col=2)
acf(draws1[,n+4],main="")
hist(draws1[,n+4],prob=TRUE,main="",xlab="")
abline(v=beta.true,col=2)
```

## Autocorrelation functions of the Markov chains

```{r fig.align='center', fig.width=8, fig.height=5}
acf1 = acf(draws1[,1],plot=FALSE)
plot(acf1$acf,type="l",xlab="Lag",ylab="ACF",col=grey(0.75),ylim=c(-0.2,1))
for (i in 2:n){
  acf1 = acf(draws1[,i],plot=FALSE)
  lines(acf1$acf,col=grey(0.75))
}
title("ACF for each x(t)")
abline(h=0,lty=3)
```

## Marginal quantiles from $p(x_1,\ldots,x_n|y_1,\ldots,y_n)$

```{r fig.align='center', fig.width=8, fig.height=5}
qx = t(apply(draws1[,1:n],2,quantile,c(0.025,0.5,0.975)))
ts.plot(qx,xlab="Time",col=c(3,2,3),lty=c(2,1,2))
lines(x.true,col=4)
```


# Comparing both MCMC schemes

```{r fig.align='center', fig.width=8, fig.height=10}
par(mfrow=c(3,2))
acf1 = acf(draws[,1],plot=FALSE)
plot(acf1$acf,type="l",xlab="Lag",ylab="ACF",col=grey(0.75),ylim=c(-0.2,1))
for (i in 2:n){
  acf1 = acf(draws[,i],plot=FALSE)
  lines(acf1$acf)
}
for (i in 2:n){
  acf1 = acf(draws1[,i],plot=FALSE)
  lines(acf1$acf,col=grey(0.75))
}
abline(h=0,lty=3)
legend("topright",legend=c("Individual sampling","Joint sampling"),col=c(1,grey(0.75)),lty=1,lwd=2,bty="n")

qx1 = t(apply(draws[,1:n],2,quantile,c(0.025,0.5,0.975)))
qx2= t(apply(draws1[,1:n],2,quantile,c(0.025,0.5,0.975)))
ts.plot(qx1,xlab="Time")
for (i in 1:3)
  lines(qx2[,i],col=grey(0.75))

plot(density(draws[,n+1]),xlab="",main=expression(sigma^2))
lines(density(draws1[,n+1]),col=grey(0.75))
plot(density(draws[,n+2]),xlab="",main=expression(tau^2))
lines(density(draws1[,n+2]),col=grey(0.75))
plot(density(draws[,n+3]),xlab="",main=expression(alpha))
lines(density(draws1[,n+3]),col=grey(0.75))
plot(density(draws[,n+4]),xlab="",main=expression(beta))
lines(density(draws1[,n+4]),col=grey(0.75))
```

