1 The model

In this illustration we will consider a simple and Gaussian linear regression model where the errors follow a first order autogressive structure, where, for \(t=1,\ldots,n\), \[\begin{eqnarray*} y_t &=& \beta x_t + u_t\\ u_t &=& \rho u_{t-1} + \epsilon_t \end{eqnarray*}\] where \(\epsilon_t\)s follows a Gaussian white noise process with variance \(\sigma^2\). This is studied, amongst various extensions, where studied in Zellner and Tiao (1964) Bayesian Analysis of the Regression Model With Autocorrelated Errors, Journal of the American Statistical Association (JASA), 59, 763-778. The paper can be downloaded freely from either https://www.jstor.org/stable/2283097 or https://doi.org/10.2307/2283097.

2 The data

In one of their exercises, \(\beta=3\), \(\sigma^2=1\) and \(\rho=0.5\) (case I) or \(\rho=1.25\) (case II), and \(n=16\). The \(x\)’s are rescaled investment expenditures taken from Haavelmo (1947) Methods of measuring the marginal propensity to consume, JASA, 42, 105-22, which can be downloaded freely from either https://www.jstor.org/stable/2280191 or https://doi.org/10.2307/2280191.

x=c(3.0,3.9,6.0,4.2,5.2,4.7,5.1,4.5,6.0,3.9,4.1,2.2,1.7,2.7,3.3,4.8)
y1=c(9.500,12.649,18.794,12.198,14.372,13.909,14.556,14.700,18.281,
     13.890,10.318,5.473,4.044,6.361,7.036,13.368)
y2=c(9.500,13.024,19.975,14.270,16.760,15.923,16.931,17.111,22.195,
     18.992,18.338,14.012,13.873,17.855,20.099,27.549)
n = length(x)

par(mfrow=c(2,3))
plot(x,y1,type="n",col=0)
text(x,y1,label=1:n,col='blue')
ts.plot(y1-3*x)
acf(y1-3*x)

plot(x,y2,type="n",col=0)
text(x,y2,label=1:n,col='blue')
ts.plot(y2-3*x)
acf(y2-3*x)

3 Simple OLS fits

fit1 = lm(y1~x)
summary(fit1)
## 
## Call:
## lm(formula = y1 ~ x)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.20858 -0.86812 -0.01237  0.74634  2.65171 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  -1.7209     1.1427  -1.506    0.154    
## x             3.3229     0.2683  12.385 6.23e-09 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.306 on 14 degrees of freedom
## Multiple R-squared:  0.9164, Adjusted R-squared:  0.9104 
## F-statistic: 153.4 on 1 and 14 DF,  p-value: 6.231e-09
fit2 = lm(y2~x)
summary(fit2)
## 
## Call:
## lm(formula = y2 ~ x)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -5.9539 -2.3962 -0.3135  1.7708  9.0627 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)   
## (Intercept)  10.3998     3.2577   3.192  0.00652 **
## x             1.6847     0.7649   2.202  0.04489 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 3.724 on 14 degrees of freedom
## Multiple R-squared:  0.2573, Adjusted R-squared:  0.2043 
## F-statistic: 4.851 on 1 and 14 DF,  p-value: 0.04489

3.1 Residual plots of the OLS fits

u1 = fit1$res
u2 = fit2$res
par(mfrow=c(1,2))
plot(u1,type="b")
plot(u2,type="b")

3.2 Fitting AR(1) to the residuals

summary(lm(u1[2:n]~u1[1:(n-1)]))
## 
## Call:
## lm(formula = u1[2:n] ~ u1[1:(n - 1)])
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.05668 -0.71217  0.04631  0.38774  2.73466 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)
## (Intercept)   -0.08763    0.33698  -0.260    0.799
## u1[1:(n - 1)]  0.07222    0.27126   0.266    0.794
## 
## Residual standard error: 1.304 on 13 degrees of freedom
## Multiple R-squared:  0.005423,   Adjusted R-squared:  -0.07108 
## F-statistic: 0.07088 on 1 and 13 DF,  p-value: 0.7942
summary(lm(u2[2:n]~u2[1:(n-1)]))
## 
## Call:
## lm(formula = u2[2:n] ~ u2[1:(n - 1)])
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -3.6734 -0.8364 -0.2031  1.1456  3.9352 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept)     0.9994     0.5213   1.917 0.077438 .  
## u2[1:(n - 1)]   0.9972     0.1907   5.228 0.000163 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.969 on 13 degrees of freedom
## Multiple R-squared:  0.6777, Adjusted R-squared:  0.6529 
## F-statistic: 27.34 on 1 and 13 DF,  p-value: 0.0001628

4 Likelihood and prior

It is easy to see that \[\begin{eqnarray*} y_t &=& \beta x_t + u_t\\ \rho y_{t-1} &=& \rho \beta x_{t-1} + \rho u_{t-1}, \end{eqnarray*}\] such that \[ y_t - \rho y_{t-1} = \beta (x_t-\rho x_{t-1}) + \epsilon_t. \] So, for the sake of the exercise, assume that the likelihood is conditional on \(y_1\) and \(x_1\). Therefore \[ p(y_{2:n}|x_{2:n},\theta) \propto (\sigma^2)^{-(n-1)/2}\exp\left\{-\frac{1}{\sigma^2} \sum_{t=2}^n (y_t - \rho y_{t-1} - \beta (x_t-\rho x_{t-1}))^2 \right\}, \] and assume that the prior for \(\theta=(\beta,\rho,\sigma^2)\) is \[ p(\beta,\rho,\sigma^2) \propto \frac{1}{\sigma^2}. \]

5 Contours of \(p(\beta,\rho|\mbox{data})\)

loglikelihoodprior = function(y,x,beta,rho,sig){
  sum(dnorm(y[2:n],rho*y[1:(n-1)]+beta*(x[2:n]-rho*x[1:(n-1)]),sig,log=TRUE))-2*log(sig)
}
M = 200
betas = seq(1,4.5,length=M)
rhos  = seq(-2,2,length=M)
sigs  = seq(0.1,4,length=M)
post  = array(0,c(M,M,M))
post2 = matrix(0,M,M)
for (i in 1:M)
  for (j in 1:M)
    for (k in 1:M)
      post[i,j,k] = loglikelihoodprior(y1,x,betas[i],rhos[j],sigs[k])
max = max(post)
for (i in 1:M)
  for (j in 1:M)
    post2[i,j] = sum(exp(post[i,j,]-max))

contour(betas,rhos,post2,xlab=expression(beta),ylab=expression(rho),nlevels=20,
        xlim=c(1,4),ylim=c(-1,2),drawlabels=FALSE)

6 SIR-based posterior inference for the case I dataset (\(\rho=0.5\))

M = 100000
beta0 = runif(M,-5,5)
rho0 = runif(M,-3,3)
sig0 = runif(M,0.01,4)
w = rep(0,M)
for (i in 1:M)
  w[i] = loglikelihoodprior(y1,x,beta0[i],rho0[i],sig0[i])
w = exp(w-max(w))
k = sample(1:M,size=M,replace=TRUE,prob=w)
beta = beta0[k]
rho  = rho0[k]
sig  = sig0[k]

draws.sir = cbind(beta,rho,sig)

par(mfrow=c(2,2))
hist(beta,main=expression(beta),ylab="Posterior",xlab="",prob=TRUE)
legend("topright",legend=c(
paste("5th perc.=",round(quantile(beta,0.05),2),sep=""),
paste("50th perc.=",round(quantile(beta,0.5),2),sep=""),
paste("95th perc.=",round(quantile(beta,0.95),2),sep="")),bty="n")                          
abline(v=3,lwd=2,col=2)
hist(rho,main=expression(rho),ylab="Posterior",xlab="",prob=TRUE)
legend("topright",legend=c(
paste("5th perc.=",round(quantile(rho,0.05),2),sep=""),
paste("50th perc.=",round(quantile(rho,0.5),2),sep=""),
paste("95th perc.=",round(quantile(rho,0.95),2),sep="")),bty="n")                          
hist(sig,main=expression(sig),ylab="Posterior",xlab="",prob=TRUE)
legend("topright",legend=c(
paste("5th perc.=",round(quantile(sig,0.05),2),sep=""),
paste("50th perc.=",round(quantile(sig,0.5),2),sep=""),
paste("95th perc.=",round(quantile(sig,0.95),2),sep="")),bty="n")
abline(v=1,lwd=2,col=2)

plot(beta,rho,xlab=expression(beta),ylab=expression(rho))
contour(betas,rhos,post2,add=TRUE,col=3,nlevels=20,drawlabels=FALSE)

7 Gibbs sampler

We need to derive the full conditional distributions in order to implement the Gibbs sampler.

7.1 \(p(\sigma^2|\beta,\rho,x_{2:n},y_{2:n})\)

Conditional on \((\beta,\rho)\), the full conditional distribution of \(\sigma^2\) is inverse-gamma with parameters \((n-1)/2\) and \(\sum_{t=2}^n (y_t-\rho y_{t-1}-\beta x_t-\rho\beta x_{t-1})^2/2\): \[ p(\sigma^2|\beta,\rho,x_{2:n},y_{2:n}) \propto \sigma^{-\left(\frac{n-1}{2}+1\right)} \exp\left\{-\frac{1}{2\sigma^2} \sum_{t=2}^n (y_t-\rho y_{t-1}-\beta x_t+\rho\beta x_{t-1})^2\right\} \]

7.2 \(p(\beta|\rho,\sigma^2,x_{2:n},y_{2:n})\)

Conditional on \((\rho,\sigma^2)\), the full conditional distribution of \(\beta\) takes advantage of the following regression set up: \[ \underbrace{y_t - \rho y_{t-1}}_{{\tilde y}_t} = \beta (\underbrace{x_t - \rho x_{t-1}}_{{\tilde x}_t}) + \epsilon_t, \] such that \[ (\beta|\rho,\sigma^2,x_{2:n},y_{2:n}) \sim N\left( \frac{\sum_{t=2}^n (y_t-\rho y_{t-1})(x_t-\rho x_{t-1})} {\sum_{t=2}^n (x_{t-1}-\rho x_{t-1})^2};\frac{\sigma^2}{\sum_{t=2}^n (x_{t-1}-\rho x_{t-1})^2} \right). \]

7.3 \(p(\rho|\beta,\sigma^2,x_{2:n},y_{2:n})\)

Conditional on \((\beta,\sigma^2)\), the full conditional distribution of \(\rho\) takes advantage of the following regression set up: \[ \underbrace{y_t - \beta x_t}_{{\tilde y}_t} = \rho (\underbrace{y_{t-1} - \beta x_{t-1}}_{{\tilde x}_t}) + \epsilon_t, \] such that \[ (\rho|\beta,\sigma^2,x_{2:n},y_{2:n}) \sim N\left( \frac{\sum_{t=2}^n (y_t-\beta x_t)(y_{t-1}-\beta x_{t-1})} {\sum_{t=2}^n (y_{t-1}-\beta x_{t-1})^2};\frac{\sigma^2}{\sum_{t=2}^n (y_{t-1}-\beta x_{t-1})^2}\right). \]

y = y1

sig2 = 1
beta = 3
rho  = 0.5

niter = 10000
draws = matrix(0,niter,3)

for (iter in 1:niter){
# Full conditional of sigma2
z    = y[2:n]-rho*y[1:(n-1)]-beta*x[2:n]+rho*beta*x[1:(n-1)]
sig2 = 1/rgamma(1,(n-1)/2,sum(z^2)/2)

# Full conditional of beta
yt   = y[2:n]-rho*y[1:(n-1)]
xt   = x[2:n]-rho*x[1:(n-1)]
beta = rnorm(1,sum(yt*xt)/sum(xt^2),sqrt(sig2/sum(xt^2)))

# Full conditional of rho
yt  = y[2:n]-beta*x[2:n]
xt  = y[1:(n-1)]-beta*x[1:(n-1)]
rho = rnorm(1,sum(yt*xt)/sum(xt^2),sqrt(sig2/sum(xt^2)))

draws[iter,] = c(beta,rho,sqrt(sig2))
}

draws.gibbs = draws

par(mfrow=c(2,3))
ts.plot(draws[,1],xlab="Iterations",ylab="",main=expression(beta))
ts.plot(draws[,2],xlab="Iterations",ylab="",main=expression(rho))
ts.plot(draws[,3],xlab="Iterations",ylab="",main=expression(sigma))
hist(draws[,1],prob=TRUE,xlab="",main="")
hist(draws[,2],prob=TRUE,xlab="",main="")
hist(draws[,3],prob=TRUE,xlab="",main="")

8 Comparing SIR and Gibbs sampler

par(mfrow=c(2,2))
hist(draws.sir[,1],prob=TRUE,xlab="",main=expression(beta))
lines(density(draws.gibbs[,1]),col=2)
abline(v=3,col=3,lwd=3)
legend("topright",legend=c("SIR","GIBBS"),col=c(1,2),lwd=3)
hist(draws.sir[,2],prob=TRUE,xlab="",main=expression(rho))
lines(density(draws.gibbs[,2]),col=2)
abline(v=0.5,col=3,lwd=3)
hist(draws.sir[,3],prob=TRUE,xlab="",main=expression(sigma))
lines(density(draws.gibbs[,3]),col=2)
abline(v=1,col=3,lwd=3)
plot(draws.sir[,1],draws.sir[,2],xlab=expression(beta),ylab=expression(rho))
points(draws.gibbs[,1],draws.gibbs[,2],col=2)

---
title: "Regression Model With Autocorrelated Errors"
subtitle: "Based on Zellner and Tiao (1964) JASA paper"
author: "Hedibert Freitas Lopes"
date: "2/3/2022"
output:
  html_document:
    toc: true
    toc_depth: 2
    code_download: yes
    number_sections: true
---

```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```

# The model
In this illustration we will consider a simple and Gaussian linear regression model where the errors follow a first order autogressive structure, where, for $t=1,\ldots,n$,
\begin{eqnarray*}
y_t &=& \beta x_t + u_t\\
u_t &=& \rho u_{t-1} + \epsilon_t
\end{eqnarray*}
where $\epsilon_t$s follows a Gaussian white noise process with variance $\sigma^2$.  This is studied, amongst various extensions, where studied in Zellner and Tiao (1964) Bayesian Analysis of the Regression Model With Autocorrelated Errors, *Journal of the American Statistical Association (JASA)*, 59, 763-778.  The paper can be downloaded freely from either
https://www.jstor.org/stable/2283097 or https://doi.org/10.2307/2283097.


# The data
In one of their exercises, $\beta=3$, $\sigma^2=1$ and $\rho=0.5$ (case I) or $\rho=1.25$ (case II), and $n=16$.  The $x$'s are rescaled investment expenditures taken from Haavelmo (1947) Methods of measuring the marginal propensity to consume, *JASA*, 42, 105-22, which can be downloaded freely from either https://www.jstor.org/stable/2280191 or https://doi.org/10.2307/2280191.

```{r fig.width=10, fig.height=7}
x=c(3.0,3.9,6.0,4.2,5.2,4.7,5.1,4.5,6.0,3.9,4.1,2.2,1.7,2.7,3.3,4.8)
y1=c(9.500,12.649,18.794,12.198,14.372,13.909,14.556,14.700,18.281,
     13.890,10.318,5.473,4.044,6.361,7.036,13.368)
y2=c(9.500,13.024,19.975,14.270,16.760,15.923,16.931,17.111,22.195,
     18.992,18.338,14.012,13.873,17.855,20.099,27.549)
n = length(x)

par(mfrow=c(2,3))
plot(x,y1,type="n",col=0)
text(x,y1,label=1:n,col='blue')
ts.plot(y1-3*x)
acf(y1-3*x)

plot(x,y2,type="n",col=0)
text(x,y2,label=1:n,col='blue')
ts.plot(y2-3*x)
acf(y2-3*x)
```


# Simple OLS fits

```{r}
fit1 = lm(y1~x)
summary(fit1)

fit2 = lm(y2~x)
summary(fit2)
```

## Residual plots of the OLS fits


```{r fig.width=8, fig.height=5}
u1 = fit1$res
u2 = fit2$res
par(mfrow=c(1,2))
plot(u1,type="b")
plot(u2,type="b")
```


## Fitting AR(1) to the residuals
```{r}
summary(lm(u1[2:n]~u1[1:(n-1)]))
summary(lm(u2[2:n]~u2[1:(n-1)]))
```

# Likelihood and prior
It is easy to see that
\begin{eqnarray*}
y_t &=& \beta x_t + u_t\\
\rho y_{t-1} &=& \rho \beta x_{t-1} + \rho u_{t-1},
\end{eqnarray*}
such that
$$
y_t - \rho y_{t-1} = \beta (x_t-\rho x_{t-1}) + \epsilon_t.
$$
So, for the sake of the exercise, assume that the likelihood is conditional on $y_1$ and $x_1$.
Therefore
$$
p(y_{2:n}|x_{2:n},\theta) \propto (\sigma^2)^{-(n-1)/2}\exp\left\{-\frac{1}{\sigma^2}
\sum_{t=2}^n (y_t - \rho y_{t-1} - \beta (x_t-\rho x_{t-1}))^2
\right\},
$$
and assume that the prior for $\theta=(\beta,\rho,\sigma^2)$ is 
$$
p(\beta,\rho,\sigma^2) \propto \frac{1}{\sigma^2}.
$$

# Contours of $p(\beta,\rho|\mbox{data})$

```{r}
loglikelihoodprior = function(y,x,beta,rho,sig){
  sum(dnorm(y[2:n],rho*y[1:(n-1)]+beta*(x[2:n]-rho*x[1:(n-1)]),sig,log=TRUE))-2*log(sig)
}
M = 200
betas = seq(1,4.5,length=M)
rhos  = seq(-2,2,length=M)
sigs  = seq(0.1,4,length=M)
post  = array(0,c(M,M,M))
post2 = matrix(0,M,M)
for (i in 1:M)
  for (j in 1:M)
    for (k in 1:M)
      post[i,j,k] = loglikelihoodprior(y1,x,betas[i],rhos[j],sigs[k])
max = max(post)
for (i in 1:M)
  for (j in 1:M)
    post2[i,j] = sum(exp(post[i,j,]-max))

contour(betas,rhos,post2,xlab=expression(beta),ylab=expression(rho),nlevels=20,
        xlim=c(1,4),ylim=c(-1,2),drawlabels=FALSE)
```

# SIR-based posterior inference for the case I dataset ($\rho=0.5$)

```{r fig.width=10, fig.height=8}
M = 100000
beta0 = runif(M,-5,5)
rho0 = runif(M,-3,3)
sig0 = runif(M,0.01,4)
w = rep(0,M)
for (i in 1:M)
  w[i] = loglikelihoodprior(y1,x,beta0[i],rho0[i],sig0[i])
w = exp(w-max(w))
k = sample(1:M,size=M,replace=TRUE,prob=w)
beta = beta0[k]
rho  = rho0[k]
sig  = sig0[k]

draws.sir = cbind(beta,rho,sig)

par(mfrow=c(2,2))
hist(beta,main=expression(beta),ylab="Posterior",xlab="",prob=TRUE)
legend("topright",legend=c(
paste("5th perc.=",round(quantile(beta,0.05),2),sep=""),
paste("50th perc.=",round(quantile(beta,0.5),2),sep=""),
paste("95th perc.=",round(quantile(beta,0.95),2),sep="")),bty="n")                          
abline(v=3,lwd=2,col=2)
hist(rho,main=expression(rho),ylab="Posterior",xlab="",prob=TRUE)
legend("topright",legend=c(
paste("5th perc.=",round(quantile(rho,0.05),2),sep=""),
paste("50th perc.=",round(quantile(rho,0.5),2),sep=""),
paste("95th perc.=",round(quantile(rho,0.95),2),sep="")),bty="n")                          
hist(sig,main=expression(sig),ylab="Posterior",xlab="",prob=TRUE)
legend("topright",legend=c(
paste("5th perc.=",round(quantile(sig,0.05),2),sep=""),
paste("50th perc.=",round(quantile(sig,0.5),2),sep=""),
paste("95th perc.=",round(quantile(sig,0.95),2),sep="")),bty="n")
abline(v=1,lwd=2,col=2)

plot(beta,rho,xlab=expression(beta),ylab=expression(rho))
contour(betas,rhos,post2,add=TRUE,col=3,nlevels=20,drawlabels=FALSE)
```

# Gibbs sampler

We need to derive the full conditional distributions in order to implement the Gibbs sampler.

## $p(\sigma^2|\beta,\rho,x_{2:n},y_{2:n})$

Conditional on $(\beta,\rho)$, the full conditional distribution of $\sigma^2$ is inverse-gamma with parameters $(n-1)/2$ and $\sum_{t=2}^n (y_t-\rho y_{t-1}-\beta x_t-\rho\beta x_{t-1})^2/2$:
$$
p(\sigma^2|\beta,\rho,x_{2:n},y_{2:n}) \propto \sigma^{-\left(\frac{n-1}{2}+1\right)}
\exp\left\{-\frac{1}{2\sigma^2} \sum_{t=2}^n (y_t-\rho y_{t-1}-\beta x_t+\rho\beta x_{t-1})^2\right\}
$$

## $p(\beta|\rho,\sigma^2,x_{2:n},y_{2:n})$
Conditional on $(\rho,\sigma^2)$, the full conditional distribution of $\beta$ takes advantage of the following regression set up:
$$
\underbrace{y_t - \rho y_{t-1}}_{{\tilde y}_t} = \beta (\underbrace{x_t - \rho x_{t-1}}_{{\tilde x}_t}) + \epsilon_t,
$$
such that
$$
(\beta|\rho,\sigma^2,x_{2:n},y_{2:n}) \sim N\left(
\frac{\sum_{t=2}^n (y_t-\rho y_{t-1})(x_t-\rho x_{t-1})}
{\sum_{t=2}^n (x_{t-1}-\rho x_{t-1})^2};\frac{\sigma^2}{\sum_{t=2}^n (x_{t-1}-\rho x_{t-1})^2}
\right).
$$

## $p(\rho|\beta,\sigma^2,x_{2:n},y_{2:n})$
Conditional on $(\beta,\sigma^2)$, the full conditional distribution of $\rho$ takes advantage of the following regression set up:
$$
\underbrace{y_t - \beta x_t}_{{\tilde y}_t} = \rho (\underbrace{y_{t-1} - \beta x_{t-1}}_{{\tilde x}_t}) + \epsilon_t,
$$
such that
$$
(\rho|\beta,\sigma^2,x_{2:n},y_{2:n}) \sim N\left(
\frac{\sum_{t=2}^n (y_t-\beta x_t)(y_{t-1}-\beta x_{t-1})}
{\sum_{t=2}^n (y_{t-1}-\beta x_{t-1})^2};\frac{\sigma^2}{\sum_{t=2}^n (y_{t-1}-\beta x_{t-1})^2}\right).
$$
```{r fig.width=10, fig.height=8}
y = y1

sig2 = 1
beta = 3
rho  = 0.5

niter = 10000
draws = matrix(0,niter,3)

for (iter in 1:niter){
# Full conditional of sigma2
z    = y[2:n]-rho*y[1:(n-1)]-beta*x[2:n]+rho*beta*x[1:(n-1)]
sig2 = 1/rgamma(1,(n-1)/2,sum(z^2)/2)

# Full conditional of beta
yt   = y[2:n]-rho*y[1:(n-1)]
xt   = x[2:n]-rho*x[1:(n-1)]
beta = rnorm(1,sum(yt*xt)/sum(xt^2),sqrt(sig2/sum(xt^2)))

# Full conditional of rho
yt  = y[2:n]-beta*x[2:n]
xt  = y[1:(n-1)]-beta*x[1:(n-1)]
rho = rnorm(1,sum(yt*xt)/sum(xt^2),sqrt(sig2/sum(xt^2)))

draws[iter,] = c(beta,rho,sqrt(sig2))
}

draws.gibbs = draws

par(mfrow=c(2,3))
ts.plot(draws[,1],xlab="Iterations",ylab="",main=expression(beta))
ts.plot(draws[,2],xlab="Iterations",ylab="",main=expression(rho))
ts.plot(draws[,3],xlab="Iterations",ylab="",main=expression(sigma))
hist(draws[,1],prob=TRUE,xlab="",main="")
hist(draws[,2],prob=TRUE,xlab="",main="")
hist(draws[,3],prob=TRUE,xlab="",main="")
```


# Comparing SIR and Gibbs sampler

```{r fig.width=10, fig.height=8}
par(mfrow=c(2,2))
hist(draws.sir[,1],prob=TRUE,xlab="",main=expression(beta))
lines(density(draws.gibbs[,1]),col=2)
abline(v=3,col=3,lwd=3)
legend("topright",legend=c("SIR","GIBBS"),col=c(1,2),lwd=3)
hist(draws.sir[,2],prob=TRUE,xlab="",main=expression(rho))
lines(density(draws.gibbs[,2]),col=2)
abline(v=0.5,col=3,lwd=3)
hist(draws.sir[,3],prob=TRUE,xlab="",main=expression(sigma))
lines(density(draws.gibbs[,3]),col=2)
abline(v=1,col=3,lwd=3)
plot(draws.sir[,1],draws.sir[,2],xlab=expression(beta),ylab=expression(rho))
points(draws.gibbs[,1],draws.gibbs[,2],col=2)
```
