1 One model and four priors

A particular physical quantity \(\theta\) can be assessed via a measurement with error modeled as \[ x_1,\ldots,x_n|\theta \sim N(\theta,40^2), \] for some \(n \geq 1\). Four physicists describe theirdegrees of knowledge and skeptcism regarding \(\theta\) as follows:

2 Posterior inference via SIR Monte Carlo

The proposal distribution, \(q(\theta)\), is a normal distribution with moments equal the maximum likelihood estimates: \[ N({\widehat \theta},\sigma^2), \] where \({\widehat \theta} = {\bar x}_n=\sum_{i=1}^n x_i/n\).

log.likelihood = function(theta,xbar,shat){
  dnorm(theta,xbar,shat,log=TRUE)
}

log.prior = function(theta,k){
  if (k==1) lprior = dnorm(theta,900,20,log=TRUE)
  if (k==2) lprior = dnorm(theta,800,80,log=TRUE)
  if (k==3) lprior = dt((theta-900)/sqrt(240),df=5,log=TRUE)-log(sqrt(240))
  if (k==4) lprior = dt((theta-800)/sqrt(3840),df=5,log=TRUE)-log(sqrt(3840))
  return(lprior)
}

3 Let us run an exercise

set.seed(11234)
theta.true = 800
sig        = 40
n          = 50
x          = trunc(rnorm(n,theta.true,sig))
n          = length(x)
xbar       = cumsum(x)/1:n
shat       = sig/sqrt(1:n)

3.1 Sampling from proposal and evaluating priors at the proposal draws

M          = 100000
theta.prop = 500+600*runif(M)
lprior     = matrix(0,M,4)
theta.post = matrix(0,M,4)
for (j in 1:4)
  lprior[,j] = log.prior(theta.prop,j)

4 Posterior plots

par(mfrow=c(4,4),mai=c(0.25,0.25,0.25,0.25))
range = c(500,1100)
thetas = seq(range[1],range[2],length=1000)
draws = array(0,c(n,M,4))
names = c("Prior A: N(900,400)",
"Prior B: N(800,6400)",
"Prior C: t5(900,240)",
"Prior D: t5(800,3840)")
ind = c(1:3,n)
for (t in 1:n){
  llike = log.likelihood(theta.prop,xbar[t],shat[t])
  for (j in 1:4){
    w = llike+lprior[,j]
    w = exp(w-max(w))
    theta.post[,j] = sample(theta.prop,size=M,replace=TRUE,prob=w)      
  }
  draws[t,,] = theta.post
  if (sum(t==ind)==1){
    for (j in 1:4){
      hist(theta.post[,j],prob=TRUE,xlim=range,xlab="",main="",ylim=c(0,0.03))
      lines(thetas,exp(log.prior(thetas,j)),lwd=2)
      points(xbar[t],0,pch=16,col=2,cex=2)
      if (t==1){
        legend("topleft",legend=names[j],bty="n")
      }
      if (j==1){
        legend("left",legend=paste("n=",t,sep=""),bty="n")  
      }
    }
  }
}

5 Comparing Bayes and MLE

q = array(0,c(n,3,4))
for (j in 1:4)
  q[,,j] = t(apply(draws[,,j],1,quantile,c(0.025,0.5,0.975)))

range = c(700,950)
names = c("Physicist A: theta ~ N(900,400)",
"Physicist B: theta ~ N(800,6400)",
"Physicist C: theta ~ t5(900,240)",
"Physicist D: theta ~ t5(800,3840)")

par(mfrow=c(2,2))
for (j in 1:4){
  ts.plot(q[,,j],ylim=range,lty=c(2,1,2),xlab="Observation",
          ylab="Posterior percentiles",lwd=2)
  lines(1:n,xbar,col=2,lwd=2)
  lines(1:n,qnorm(0.025,xbar,shat),col=2,lwd=2,lty=2)
  lines(1:n,qnorm(0.975,xbar,shat),col=2,lwd=2,lty=2)
  abline(h=theta.true,col=3,lwd=2)
  title(names[j])
  if (j==1){
    legend("topright",legend=c("Bayes","MLE","True"),col=1:3,lty=1,lwd=2)
  }
}