################################################################################# # # Unit 5 - Example i. # # Several methods to compute the normalizing constant f(y) # ################################################################################# # # Author : Hedibert Freitas Lopes # Graduate School of Business # University of Chicago # 5807 South Woodlawn Avenue # Chicago, Illinois, 60637 # Email : hlopes@ChicagoGSB.edu # ################################################################################# prior = function(theta){dt((theta-theta0)/sqrt(tau02),1)/tau02} like = function(theta){dnorm(theta,xbar,sqrt(sig2n))} post = function(theta){prior(theta)*like(theta)} g = function(theta){dnorm(theta,th.hat,sqrt(V))} alpha = function(theta){1/sqrt(g(theta)*post(theta))} g0 = function(theta){prior(theta)} gk = function(theta){post(theta)} gi = function(theta,i){g0(theta)^(1-lambda[i])*gk(theta)^(lambda[i])} # Situation of example 2.1 and 2.3 of Gamerman and Lopes (2006) # ------------------------------------------------------------- theta0 = 0.0 tau02 = 1.0 xbar = 7.0 sig2n = 4.5 theta = seq(-15,15,length=1000) # Riemman's integration # --------------------- thetas = seq(-15,15,length=10000000) f.true = (thetas[2]-thetas[1])*sum(post(thetas)) # Finding the mode of the posterior density # ----------------------------------------- func = function(theta){-log(post(theta))} th.hat = nlm(func,0.05)$estimate # Sampling from the posterior using a SIR algorithm # ------------------------------------------------- set.seed(192477) M = 100000 th = rnorm(M,th.hat,2) w = post(th)/dnorm(th,th.hat,2) w = w/sum(w) ind = sample(1:M,size=M,replace=T,prob=w) th1 = th[ind] V = var(th1) # Variance of the normal approximation # f0: Normal approximation estimator # ---------------------------------- f0 = post(th.hat)*sqrt(2*pi)*sqrt(V) # f1: simple Monte Carlo estimator # -------------------------------- set.seed(192477) th = theta0+sqrt(tau02)*rt(M,1) f1 = mean(like(th)) # f4: harmonic mean estimator # --------------------------- f4 = 1/mean(1/like(th1)) # f5: Newton and Raftery # ---------------------- set.seed(192477) deltas = seq(0.01,0.1,by=0.01) th = theta0+sqrt(tau02)*rt(M,1) u = runif(M) f5s = NULL for (delta in deltas){ M1 = sum(u1]=1 accept2 = post(th4)/post(phi)*dnorm(phi,th4,sqrt(V))/dnorm(th4,phi,sqrt(V)) accept1[accept2>1]=1 pphi = mean(accept1*dnorm(phi,th1,sqrt(V)))/mean(accept2) f11 = like(phi)*prior(phi)/pphi f11s = c(f11s,f11) } # Figure 7.1 (from Gamerman and Lopes, 2006): prior, likelihood, posterior and normal approximation # --------------------------------------------------------------------------------------------------- par(mfrow=c(2,2)) breaks = seq(min(th1),max(th1),length=50) plot(theta,prior(theta),xlab="",ylab="",main="(a)",ylim=c(0,0.32),type="l",axes=F) axis(1) axis(2) lines(theta,like(theta),lty=2) lines(theta,dnorm(theta,th.hat,sqrt(V)),lty=3) hist(th1,xlab="",ylab="",main="(b)",breaks=breaks,prob=T) lines(theta,post(theta)/f.true) plot(deltas,10000*f5s,type="b",xlab=expression(delta),ylab="10000 f(y)",main="(c)",axes=F,ylim=10000*range(f5s,f11s)) axis(1,at=deltas) axis(2) abline(h=10000*f.true) plot(phis,10000*f11s,type="b",xlab=expression(phi),ylab="10000 f(y)",main="(d)",axes=F,ylim=10000*range(f5s,f11s)) axis(1) axis(2) abline(h=10000*f.true) points(phis[7],f11s[7],pch=16) points(phis[14],f11s[14],pch=15) # Table 7.2 (from Gamerman and Lopes, 2006) # ------------------------------------------ fff = round(c(f0,f1,f4,f5s[10],f6,f7,f8,f11s[14]),8) error = round(100*abs(fff-f.true)/f.true,2) cbind(fff,error)