# Logit link like1 = function(b){ theta = 1/(1+exp(-a-b*x)) A*prod((theta^y)*((1-theta)^(1-y))) } # Probit link like2 = function(b){ theta = pnorm(a+b*x) A*prod((theta^y)*((1-theta)^(1-y))) } # Complementary log-log link like3 = function(b){ theta = 1-exp(-exp(a+b*x)) A*prod((theta^y)*((1-theta)^(1-y))) } # Prior distribution for beta (temperature coefficient) prior = function(b,j){dnorm(b,b0,Sb[j])} # O-ring data - failure when y=1 - x is temperature (F) y = c(1,1,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0) t = c(53,57,58,63,66,67,67,67,68,69,70,70,70,70,72, 73,75,75,76,76,78,79,81) n = length(y) ny = sum(y) A = gamma(n+1)/(gamma(ny+1)*gamma(n-ny+1)) x = t - mean(t) a = -1.26 # regression intercept b0 = 0.0 # Prior mean Vb = 10 # Prior variance Sb = sqrt(c(1,10,100)) M = 100000 # Number of draws # computing p(y) by simpson's rule l = array(0,c(M,3,3)) py = matrix(0,3,3) betas = seq(-1.0,0.12,length=M) h = betas[2]-betas[1] for (j in 1:3){ for (i in 1:M){ l[i,1,j] = like1(betas[i])*prior(betas[i],j) l[i,2,j] = like2(betas[i])*prior(betas[i],j) l[i,3,j] = like3(betas[i])*prior(betas[i],j) } for (i in 1:3) py[i,j] = h*sum(l[,i,j]) } # Sampling from the prior p(beta) set.seed(1234) l = array(0,c(M,3,3)) betas = matrix(0,M,3) for (j in 1:3){ betas[,j] = rnorm(M,b0,Sb[j]) for (i in 1:M){ l[i,1,j] = like1(betas[i,j]) l[i,2,j] = like2(betas[i,j]) l[i,3,j] = like3(betas[i,j]) } } # Sampling from the prior p(beta) set.seed(1234) l = array(0,c(M,3,3)) betas = matrix(0,M,3) for (j in 1:3){ betas[,j] = rnorm(M,b0,Sb[j]) for (i in 1:M){ l[i,1,j] = like1(betas[i,j]) l[i,2,j] = like2(betas[i,j]) l[i,3,j] = like3(betas[i,j]) } } # Sampling from the posterior p(beta|y) # SIR with q(beta)=prior w = array(0,c(M,3,3)) betas1 = array(0,c(M,3,3)) for (i in 1:3){ for (j in 1:3){ w[,i,j] = l[,i,j]/sum(l[,i,j]) betas1[,i,j] = sample(betas[,j],size=M,replace=T,prob=w[,i,j]) } } mb = mean(betas1) sdb = sqrt(var(betas1)) # Computing p(y) based on the MC estimator # where the proposal q(beta) is the prior py1 = matrix(0,3,3) for (j in 1:3){ py1[1,j] = mean(l[,1,j]) py1[2,j] = mean(l[,2,j]) py1[3,j] = mean(l[,3,j]) } # Computing p(y) based on the harmonic mean estimator # where the proposal q(beta) is the prior l = array(0,c(M,3,3)) for (j in 1:3){ for (i in 1:M){ l[i,1,j] = like1(betas1[i,1,j]) l[i,2,j] = like2(betas1[i,2,j]) l[i,3,j] = like3(betas1[i,3,j]) } } py2 = matrix(0,3,3) for (j in 1:3) for (i in 1:3) py2[i,j] = 1/mean(1/l[,i,j]) # Computing p(y) based on the harmonic mean estimator # where the proposal q(beta) is the N(mb,sdb) for (i in 1:3) for (j in 1:3) l[,i,j] = l[,i,j]*prior(betas1[,i,j],j) py3 = matrix(0,3,3) for (j in 1:3) for (i in 1:3) py3[i,j] = 1/mean(dnorm(betas1[,i,j],mb,sdb)/l[,i,j]) pm = py/sum(py) pm1 = py1/sum(py1) pm2 = py2/sum(py2) pm3 = py3/sum(py3) # Marginal likelihoods round(cbind(matrix(py,9,1,byrow=T), matrix(py1,9,1,byrow=T), matrix(py3,9,1,byrow=T), matrix(py2,9,1,byrow=T)),4) # Posterior model probabilities round(cbind(matrix(pm,9,1,byrow=T), matrix(pm1,9,1,byrow=T), matrix(pm3,9,1,byrow=T), matrix(pm2,9,1,byrow=T)),3) pm = py%*%diag(1/apply(py,2,sum)) pm1 = py1%*%diag(1/apply(py1,2,sum)) pm2 = py2%*%diag(1/apply(py2,2,sum)) pm3 = py3%*%diag(1/apply(py3,2,sum)) round(cbind(matrix(pm,9,1), matrix(pm1,9,1), matrix(pm3,9,1), matrix(pm2,9,1)),3)