This document demonstrates Bayesian analysis techniques for handling missing data, comparing different model specifications using both lavaan (for structural equation modeling) and R2jags (for Bayesian modeling with JAGS).

Setup

First, we load the necessary R packages.

if (!require(mvtnorm)){
  install.packages("mvtnorm")
}
## Loading required package: mvtnorm
library(mvtnorm)

haspackage = require("lavaan")
## Loading required package: lavaan
## This is lavaan 0.6-19
## lavaan is FREE software! Please report any bugs.
if (haspackage==FALSE){
  install.packages("lavaan")
}
library(lavaan)

haspackage = require("R2jags")
## Loading required package: R2jags
## Loading required package: rjags
## Loading required package: coda
## Linked to JAGS 4.3.2
## Loaded modules: basemod,bugs
## 
## Attaching package: 'R2jags'
## The following object is masked from 'package:coda':
## 
##     traceplot
if (haspackage==FALSE){
  install.packages("R2jags")
}
library(R2jags)

Data Simulation

We simulate a dataset with two groups (men and women) and several variables. Missingness is introduced in cc, perf, and use based on logistic models.

set.seed(20250223)

Nmen = 121
Nwomen = 229

MeanMEN = c(5.081333347, 9.723030258, 51.94404048, 52.66452548, 34.04606078, 77.06181845, 14.75389904)
MeanWOMEN = c(4.80418631, 10.60486174, 50.34834542, 48.13359134, 30.61321679, 71.77082955, 13.75449003)

Corr = matrix(c(
  1.0,  .15, .12, .48, .44, .47, .44,
  .15, 1.0, .06, .25, .20, .23, .23,
  .12, .06, 1.0, .40, .32, .19, .14,
  .48, .25, .40, 1.0, .87, .61, .54,
  .44, .20, .32, .87, 1.0, .56, .51,
  .47, .23, .19, .61, .56, 1.0, .70,
  .44, .23, .14, .54, .51, .70, 1.0
), nrow = 7, byrow = TRUE)

SD = c(1.2, 6.0, 15.2, 16.6, 10.9, 10.5, 2.8)
SDdiag = diag(SD)

Cov = SDdiag %*% Corr %*% SDdiag

xmen = rmvnorm(Nmen, MeanMEN, Cov)
xwomen = rmvnorm(Nwomen, MeanWOMEN, Cov)

xmen = cbind(0, xmen)
xwomen = cbind(1, xwomen)

allx = as.data.frame(rbind(xmen, xwomen))
names(allx) = c("female", "hsl", "cc", "use", "msc", "mas", "mse", "perf")

# Missingness in CC
M_CC_intercept = -3
M_CC_female = 1
M_CC_msc = .01
M_CC_mas = .01
M_CC_mse = .01

missingCCmodelLogit = 
  M_CC_intercept +
  M_CC_female*allx[,"female"] +
  M_CC_msc*allx[,"msc"] +
  M_CC_mas*allx[,"mas"] +
  M_CC_mse*allx[,"mse"]

missingCCmodelProb = exp(missingCCmodelLogit)/(1+exp(missingCCmodelLogit))
makeMissingCC = which(runif(Nmen+Nwomen) < missingCCmodelProb)
allx$cc[makeMissingCC] = NA

# Missingness in PERF
M_perf_intercept = -3
M_perf_female = .5
M_perf_msc = .001
M_perf_mas = .02
M_perf_mse = .01

missingPERFmodelLogit = 
  M_perf_intercept +
  M_perf_female*allx[,"female"] +
  M_perf_msc*allx[,"msc"] +
  M_perf_mas*allx[,"mas"] +
  M_perf_mse*allx[,"mse"]

missingPERFmodelProb = exp(missingPERFmodelLogit)/(1+exp(missingPERFmodelLogit))
makeMissingPerf = which(runif(Nmen+Nwomen) < missingPERFmodelProb)
allx$perf[makeMissingPerf] = NA

# Missingness in USE
M_use_intercept = -3
M_use_female = .5
M_use_msc = .001
M_use_mas = .02
M_use_mse = .01

missingUSEmodelLogit = 
  M_use_intercept +
  M_use_female*allx[,"female"] +
  M_use_msc*allx[,"msc"] +
  M_use_mas*allx[,"mas"] +
  M_use_mse*allx[,"mse"]

missingUSEmodelProb = exp(missingUSEmodelLogit)/(1+exp(missingUSEmodelLogit))
makeMissingUse = which(runif(Nmen+Nwomen) < missingUSEmodelProb)
allx$use[makeMissingUse] = NA

data02 = as.data.frame(allx)

# Center CC
par(mfrow = c(1,2))
boxplot(data02$cc, main="Boxplot of College Experience (CC)")
hist(data02$cc, main = "Histogram of College Experience (CC)", xlab = "CC")

par(mfrow = c(1,1))

data02$cc10 = data02$cc - 10

# Interaction term
data02$femXcc10 = data02$female*data02$cc10

newData = data02[c("perf", "use", "female", "cc10")]

# Check for observations missing on all variables
allNA = apply(newData, 1, function(x) all(is.na(x)))
which(allNA)
## integer(0)

Model 1: Empty Model

Model Description This model examines the basic relationships between perf and use without any predictors. In the lavaan specification, perf and use are predicted by their intercepts, and their variances and covariance are estimated. The JAGS model model01a.jags.syntax mirrors this, estimating the means and the covariance matrix of perf and use.

lavaan Specification

model01.lavaan.syntax = "

#Means:
perf ~ 1
use  ~ 1

#Variances:
perf ~~ perf
use  ~~ use

#Covariance:
perf ~~ use

"
model01.lavaan.fit = lavaan(model01.lavaan.syntax, data=data02, estimator = "MLR", mimic = "MPLUS")
## Warning: lavaan->lav_data_full():  
##    some cases are empty and will be ignored: 61 85 125 140 155 157 210 211 
##    213 272 299 346.
summary(model01.lavaan.fit, standardized=TRUE, fit.measures=TRUE)
## lavaan 0.6-19 ended normally after 29 iterations
## 
##   Estimator                                         ML
##   Optimization method                           NLMINB
##   Number of model parameters                         5
## 
##                                                   Used       Total
##   Number of observations                           338         350
##   Number of missing patterns                         3            
## 
## Model Test User Model:
##                                               Standard      Scaled
##   Test Statistic                                 0.000       0.000
##   Degrees of freedom                                 0           0
## 
## Model Test Baseline Model:
## 
##   Test statistic                                10.038      12.196
##   Degrees of freedom                                 1           1
##   P-value                                        0.002       0.000
##   Scaling correction factor                                  0.823
## 
## User Model versus Baseline Model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.000       1.000
##                                                                   
##   Robust Comparative Fit Index (CFI)                         1.000
##   Robust Tucker-Lewis Index (TLI)                            1.000
## 
## Loglikelihood and Information Criteria:
## 
##   Loglikelihood user model (H0)              -1829.062   -1829.062
##   Loglikelihood unrestricted model (H1)      -1829.062   -1829.062
##                                                                   
##   Akaike (AIC)                                3668.125    3668.125
##   Bayesian (BIC)                              3687.240    3687.240
##   Sample-size adjusted Bayesian (SABIC)       3671.379    3671.379
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000          NA
##   90 Percent confidence interval - lower         0.000          NA
##   90 Percent confidence interval - upper         0.000          NA
##   P-value H_0: RMSEA <= 0.050                       NA          NA
##   P-value H_0: RMSEA >= 0.080                       NA          NA
##                                                                   
##   Robust RMSEA                                               0.000
##   90 Percent confidence interval - lower                     0.000
##   90 Percent confidence interval - upper                     0.000
##   P-value H_0: Robust RMSEA <= 0.050                            NA
##   P-value H_0: Robust RMSEA >= 0.080                            NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.000       0.000
## 
## Parameter Estimates:
## 
##   Standard errors                             Sandwich
##   Information bread                           Observed
##   Observed information based on                Hessian
## 
## Covariances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   perf ~~                                                               
##     use               9.908    2.776    3.569    0.000    9.908    0.209
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     perf             13.837    0.173   79.878    0.000   13.837    4.838
##     use              50.802    0.999   50.834    0.000   50.802    3.062
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     perf              8.179    0.677   12.077    0.000    8.179    1.000
##     use             275.265   24.557   11.209    0.000  275.265    1.000

R2jags Specification (model01a)

model01a.jags.syntax <- function(){

    # Priors for means
    mu.perf ~ dnorm(0, 0.001)
    mu.use ~ dnorm(0, 0.001)

    # Prior for inverse covariance matrix (precision matrix)
    Omega[1:2, 1:2] ~ dwish(R, df)
    Sigma <- inverse(Omega) # Covariance matrix

    # Likelihood
    for (i in 1:n) {
      Y[i, 1:2] ~ dmnorm(mu, Omega[,])
    }

    # Means vector
    mu[1] <- mu.perf
    mu[2] <- mu.use

    # Derived parameters (variances and covariance)
    variance.perf <- Sigma[1, 1]
    variance.use <- Sigma[2, 2]
    covariance <- Sigma[1, 2]
    correlation <- covariance / (sqrt(variance.perf) * sqrt(variance.use))
}

model01a.jags.data = list(
  Y = data02[c("perf","use")],
  n = nrow(data02),
  df = 2,
  R = diag(2)
) 
  
model01a.jags.params = c("mu.perf", "mu.use", "variance.perf", "variance.use", "covariance", "correlation")
# 
# model01.jags.samples = jags(
#   data = model01a.jags.data,
#   parameters.to.save = model01a.jags.params,
#   model.file = model01a.jags.syntax,
#   n.chains = 5,
#   n.iter = 6000,
#   n.burnin = 1000,
#   n.thin = 1,
# )

R2jags Specification (model01b)

model01a.jags.syntax fails because multivariate distributions cannot contain missing data. model01b.jags.syntax uses complete cases only.

data03 = data02[c("use", "perf")]
data03 = data03[complete.cases(data03),]

model01b.jags.data = list(
  Y = data03,
  n = nrow(data03),
  df = 2,
  R = diag(2)
) 

model01b.jags.samples = jags(
  data = model01b.jags.data,
  parameters.to.save = model01a.jags.params,
  model.file = model01a.jags.syntax,
  n.chains = 4,
  n.iter = 6000,
  n.burnin = 1000,
  n.thin = 1,
)
## module glm loaded
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 208
##    Unobserved stochastic nodes: 3
##    Total graph size: 229
## 
## Initializing model
model01b.jags.samples
## Inference for Bugs model at "/var/folders/9l/vt76dbhs4m98bljbrl850qsm0000gn/T//Rtmp1xBP6I/model7af376f82cb8.txt", fit using jags,
##  4 chains, each with 6000 iterations (first 1000 discarded)
##  n.sims = 20000 iterations saved. Running time = 2.299 secs
##                mu.vect sd.vect     2.5%      25%      50%      75%    97.5%
## correlation      0.218   0.067    0.084    0.173    0.219    0.263    0.345
## covariance      10.910   3.612    4.098    8.430   10.819   13.284   18.247
## mu.perf         50.705   1.179   48.365   49.918   50.712   51.492   53.049
## mu.use          13.633   0.204   13.233   13.493   13.634   13.772   14.033
## variance.perf  289.345  28.787  238.219  269.389  287.746  307.328  351.031
## variance.use     8.644   0.855    7.134    8.039    8.588    9.186   10.455
## deviance      2798.920   3.187 2794.725 2796.585 2798.242 2800.554 2806.753
##                Rhat n.eff
## correlation   1.001  9200
## covariance    1.001 12000
## mu.perf       1.001 16000
## mu.use        1.001  9900
## variance.perf 1.001  9100
## variance.use  1.001  4800
## deviance      1.001 20000
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule: pV = var(deviance)/2)
## pV = 5.1 and DIC = 2804.0
## DIC is an estimate of expected predictive error (lower deviance is better).
plot(as.mcmc(model01b.jags.samples))

lavaan Specification (model01c)

model01c.lavaan.syntax uses a factored regression approach to handle missing data, perf ~ use and use ~ 1.

model01c.lavaan.syntax = "

# exogenous mean:
use ~ 1

# exogenous variance:
use ~~ use

# regression:
perf ~ 1 + use

# residual variance:
perf ~~ perf
"

model01c.lavaan.fit = lavaan(model01c.lavaan.syntax, data=data02, estimator = "MLR", mimic = "MPLUS")
## Warning: lavaan->lav_data_full():  
##    some cases are empty and will be ignored: 61 85 125 140 155 157 210 211 
##    213 272 299 346.
summary(model01c.lavaan.fit, standardized=TRUE, fit.measures=TRUE)
## lavaan 0.6-19 ended normally after 16 iterations
## 
##   Estimator                                         ML
##   Optimization method                           NLMINB
##   Number of model parameters                         5
## 
##                                                   Used       Total
##   Number of observations                           338         350
##   Number of missing patterns                         3            
## 
## Model Test User Model:
##                                               Standard      Scaled
##   Test Statistic                                 0.000       0.000
##   Degrees of freedom                                 0           0
## 
## Model Test Baseline Model:
## 
##   Test statistic                                10.038      12.196
##   Degrees of freedom                                 1           1
##   P-value                                        0.002       0.000
##   Scaling correction factor                                  0.823
## 
## User Model versus Baseline Model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.000       1.000
##                                                                   
##   Robust Comparative Fit Index (CFI)                         1.000
##   Robust Tucker-Lewis Index (TLI)                            1.000
## 
## Loglikelihood and Information Criteria:
## 
##   Loglikelihood user model (H0)              -1829.062   -1829.062
##   Loglikelihood unrestricted model (H1)      -1829.062   -1829.062
##                                                                   
##   Akaike (AIC)                                3668.125    3668.125
##   Bayesian (BIC)                              3687.240    3687.240
##   Sample-size adjusted Bayesian (SABIC)       3671.379    3671.379
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000          NA
##   90 Percent confidence interval - lower         0.000          NA
##   90 Percent confidence interval - upper         0.000          NA
##   P-value H_0: RMSEA <= 0.050                       NA          NA
##   P-value H_0: RMSEA >= 0.080                       NA          NA
##                                                                   
##   Robust RMSEA                                               0.000
##   90 Percent confidence interval - lower                     0.000
##   90 Percent confidence interval - upper                     0.000
##   P-value H_0: Robust RMSEA <= 0.050                            NA
##   P-value H_0: Robust RMSEA >= 0.080                            NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.000       0.000
## 
## Parameter Estimates:
## 
##   Standard errors                             Sandwich
##   Information bread                           Observed
##   Observed information based on                Hessian
## 
## Regressions:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   perf ~                                                                
##     use               0.036    0.010    3.648    0.000    0.036    0.209
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     use              50.802    0.999   50.834    0.000   50.802    3.062
##    .perf             12.008    0.527   22.766    0.000   12.008    4.199
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     use             275.265   24.557   11.209    0.000  275.265    1.000
##    .perf              7.822    0.670   11.682    0.000    7.822    0.956

R2jags Specification (model01c)

model01c.jags.syntax mirrors the factored regression approach of model01c.lavaan.syntax in JAGS.

model01c.jags.syntax <- function(){
  
  # model for use: -----------------
  
  for (obs in 1:N){
    # use mean model: 
    muUse[obs] <- use.intercept
    
    # use model data distribution
    use[obs] ~ dnorm(muUse[obs], tauUse)  
  }
  
  # use prior distributions
  use.intercept ~ dnorm(use.intercept.mean0, use.intercept.precision0)
  tauUse ~ dgamma(use.tau.shape0, use.tau.rate0)
  
  # use variance
  sigma2Use <- 1/tauUse
  
  # model for perf: -----------------
  
  # perf model data distribution
  for (obs in 1:N){
    # perf mean model
    
    muPerf[obs] <- perf.intercept + perf.use*use[obs]
    perf[obs] ~ dnorm(muPerf[obs], tauPerf)    
  }
  

  # perf prior distributions
  perf.intercept ~ dnorm(perf.intercept.mean0, perf.intercept.precision0)
  perf.use ~ dnorm(perf.use.mean0, perf.use.precision0)
  tauPerf ~ dgamma(perf.tau.shape0, perf.tau.rate0)
  
  # perf residual variance
  sigma2Perf <- 1/tauPerf
  
  
}

model01c.jags.data = list(
  use = data02$use,
  perf = data02$perf,
  N = nrow(data02),
  use.intercept.mean0 = 0,
  use.intercept.precision0 = 1/1000,
  use.tau.shape0 = 1,
  use.tau.rate0 = 1,
  perf.intercept.mean0 = 0,
  perf.intercept.precision0 = 1/1000,
  perf.use.mean0 = 0,
  perf.use.precision0 = 1/1000,
  perf.tau.shape0 = 1,
  perf.tau.rate0 = 1
) 

model01c.jags.params = c(
  "use.intercept", 
  "sigma2Use", 
  "perf.intercept", 
  "perf.use", 
  "sigma2Perf",
  "perf[3]",
  "use[8]"
)

model01c.jags.samples = jags(
  data = model01c.jags.data,
  parameters.to.save = model01c.jags.params,
  model.file = model01c.jags.syntax,
  n.chains = 4,
  n.iter = 6000,
  n.burnin = 1000,
  n.thin = 1
)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 546
##    Unobserved stochastic nodes: 159
##    Total graph size: 1419
## 
## Initializing model
model01c.jags.samples
## Inference for Bugs model at "/var/folders/9l/vt76dbhs4m98bljbrl850qsm0000gn/T//Rtmp1xBP6I/model7af3984ac3.txt", fit using jags,
##  4 chains, each with 6000 iterations (first 1000 discarded)
##  n.sims = 20000 iterations saved. Running time = 0.907 secs
##                 mu.vect sd.vect     2.5%      25%      50%      75%    97.5%
## perf.intercept   11.999   0.587   10.879   11.589   11.996   12.402   13.160
## perf.use          0.036   0.011    0.014    0.029    0.036    0.044    0.057
## perf[3]          13.155   2.823    7.646   11.266   13.134   15.038   18.734
## sigma2Perf        7.878   0.691    6.642    7.389    7.841    8.313    9.354
## sigma2Use       276.204  23.532  234.072  259.694  274.826  291.087  326.086
## use.intercept    50.755   0.994   48.828   50.079   50.750   51.421   52.727
## use[8]           50.822  16.230   19.172   39.953   50.746   61.586   83.051
## deviance       3660.522   4.527 3652.071 3657.498 3660.323 3663.348 3670.200
##                 Rhat n.eff
## perf.intercept 1.002  1900
## perf.use       1.002  1700
## perf[3]        1.001 20000
## sigma2Perf     1.001 20000
## sigma2Use      1.001 20000
## use.intercept  1.001 20000
## use[8]         1.001 20000
## deviance       1.001  5200
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule: pV = var(deviance)/2)
## pV = 10.2 and DIC = 3670.8
## DIC is an estimate of expected predictive error (lower deviance is better).
plot(as.mcmc(model01c.jags.samples))

Model 2: Adding Predictors

Model Description

Model 2 expands on Model 1 by adding female and cc10 as predictors for both perf and use. The lavaan model estimates the coefficients for these predictors. The JAGS model mirrors this, including priors for the coefficients of female and cc10.

lavaan Specification (model02a)

data02 = data02
data02$female[sample(x = 1:nrow(data02), replace = TRUE, size = .1*nrow(data02) )] = NA

model02a.lavaan.syntax = "

#Means:
perf ~ 1 + female + cc10
use  ~ 1 + female + cc10

#Variances:
perf ~~ perf
use  ~~ use

#Covariance:
perf ~~ use

"

model02.lavaan.fit = lavaan(model02a.lavaan.syntax, data=data02, estimator = "MLR", mimic = "MPLUS")
## Warning: lavaan->lav_data_full():  
##    124 cases were deleted due to missing values in exogenous variable(s), 
##    while fixed.x = TRUE.
summary(model02.lavaan.fit, standardized=TRUE, fit.measures=TRUE)
## lavaan 0.6-19 ended normally after 35 iterations
## 
##   Estimator                                         ML
##   Optimization method                           NLMINB
##   Number of model parameters                         9
## 
##                                                   Used       Total
##   Number of observations                           226         350
##   Number of missing patterns                         4            
## 
## Model Test User Model:
##                                               Standard      Scaled
##   Test Statistic                                 0.000       0.000
##   Degrees of freedom                                 0           0
## 
## Model Test Baseline Model:
## 
##   Test statistic                                29.334      30.182
##   Degrees of freedom                                 5           5
##   P-value                                        0.000       0.000
##   Scaling correction factor                                  0.972
## 
## User Model versus Baseline Model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.000       1.000
##                                                                   
##   Robust Comparative Fit Index (CFI)                         1.000
##   Robust Tucker-Lewis Index (TLI)                            1.000
## 
## Loglikelihood and Information Criteria:
## 
##   Loglikelihood user model (H0)              -1183.296   -1183.296
##   Loglikelihood unrestricted model (H1)      -1183.296   -1183.296
##                                                                   
##   Akaike (AIC)                                2384.592    2384.592
##   Bayesian (BIC)                              2415.377    2415.377
##   Sample-size adjusted Bayesian (SABIC)       2386.854    2386.854
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000          NA
##   90 Percent confidence interval - lower         0.000          NA
##   90 Percent confidence interval - upper         0.000          NA
##   P-value H_0: RMSEA <= 0.050                       NA          NA
##   P-value H_0: RMSEA >= 0.080                       NA          NA
##                                                                   
##   Robust RMSEA                                               0.000
##   90 Percent confidence interval - lower                     0.000
##   90 Percent confidence interval - upper                     0.000
##   P-value H_0: Robust RMSEA <= 0.050                            NA
##   P-value H_0: Robust RMSEA >= 0.080                            NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.000       0.000
## 
## Parameter Estimates:
## 
##   Standard errors                             Sandwich
##   Information bread                           Observed
##   Observed information based on                Hessian
## 
## Regressions:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   perf ~                                                                
##     female           -1.722    0.398   -4.327    0.000   -1.722   -0.297
##     cc10              0.081    0.031    2.599    0.009    0.081    0.191
##   use ~                                                                 
##     female           -1.468    2.679   -0.548    0.584   -1.468   -0.042
##     cc10              0.023    0.173    0.134    0.893    0.023    0.009
## 
## Covariances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##  .perf ~~                                                               
##    .use               7.312    3.474    2.105    0.035    7.312    0.164
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .perf             14.645    0.298   49.095    0.000   14.645    5.197
##    .use              51.483    2.220   23.188    0.000   51.483    3.044
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .perf              6.954    0.742    9.370    0.000    6.954    0.876
##    .use             285.504   33.415    8.544    0.000  285.504    0.998

R2jags Specification (model02a)

model02a.jags.syntax = function(){
  
  # model for use: -----------------
  
  for (obs in 1:N){
    # use mean model: 
    muUse[obs] <- use.intercept + use.female*female[obs] + use.cc10*cc10[obs]
    
    # use model data distribution
    use[obs] ~ dnorm(muUse[obs], tauUse)  
  }
  
  # use prior distributions -- empirical prior for regression parameters
  use.intercept ~ dnorm(useMean0, usePrecision0)
  use.female ~ dnorm(useMean0, usePrecision0)
  use.cc10 ~ dnorm(useMean0, usePrecision0)
  
  useMean0 ~ dnorm(0, 1/1000)
  usePrecision0 ~ dgamma(1, 1)
  
  tauUse ~ dgamma(use.tau.shape0, use.tau.rate0)
  
  # use variance
  sigma2Use <- 1/tauUse
  
  # model for perf: -----------------
  
  # perf model data distribution
  for (obs in 1:N){
    # perf mean model
    
    muPerf[obs] <- perf.intercept + perf.female*female[obs] + perf.cc10*cc10[obs]
    perf[obs] ~ dnorm(muPerf[obs], tauPerf)    
  }
  

  # perf prior distributions
  perf.intercept ~ dnorm(perfMean0, perfPrecision0)
  perf.female ~ dnorm(perfMean0, perfPrecision0)
  perf.cc10 ~ dnorm(perfMean0, perfPrecision0)
  
  perfMean0 ~ dnorm(0, 1/1000)
  perfPrecision0 ~ dgamma(1, 1)
  
  tauPerf ~ dgamma(perf.tau.shape0, perf.tau.rate0)
  
  # perf residual variance
  sigma2Perf <- 1/tauPerf
  

}

# Prepare data for JAGS -- here perf and use are separate variables
model02a.jags.data = list(
  use = data02$use,
  perf = data02$perf,
  female = data02$female,
  cc10 = data02$cc10,
  N = nrow(data02),
  use.tau.shape0 = 1,
  use.tau.rate0 = 1,
  perf.tau.shape0 = 1,
  perf.tau.rate0 = 1
) 

model02a.jags.params = c(
  "use.intercept", 
  "use.female",
  "use.cc10",
  "sigma2Use", 
  "perf.intercept", 
  "perf.female",
  "perf.cc10",
  "sigma2Perf",
  "perf[3]",
  "use[8]"
)

# Run the model using the R2jags package
# model02a.jags.samples = jags(
#   data = model02a.jags.data,
#   parameters.to.save = model02a.jags.params,
#   model.file = model02a.jags.syntax,
#   n.chains = 4,
#   n.iter = 6000,
#   n.burnin = 1000,
#   n.thin = 1
# )

R2jags Specification (model02b)

model02a.jags.syntax has problems with missing predictor data. model02b.jags.syntax addresses this by including the predictors female and cc10 in the likelihood. Priors are specified for the parameters of the distributions of female and cc10.

model02b.jags.syntax = function(){
  
  # model for female -------------
  for (obs in 1:N){
    female[obs] ~ dbern(pFemale)  
  }
  
  # priors for female
  pFemale ~ dbeta(1, 1)
  
  # model for cc10
  for (obs in 1:N){
    cc10[obs] ~ dnorm(muCC10, tauCC10)  
  }
  
  
  # priors for cc10
  muCC10 ~ dnorm(0, 1/1000)
  tauCC10 ~ dgamma(1, 1)
  
  # model for use: -----------------
  
  for (obs in 1:N){
    # use mean model: 
    muUse[obs] <- use.intercept + use.female*female[obs] + use.cc10*cc10[obs]
    
    # use model data distribution
    use[obs] ~ dnorm(muUse[obs], tauUse)  
  }
  
  # use prior distributions -- empirical prior for regression parameters
  use.intercept ~ dnorm(useMean0, usePrecision0)
  use.female ~ dnorm(useMean0, usePrecision0)
  use.cc10 ~ dnorm(useMean0, usePrecision0)
  
  useMean0 ~ dnorm(0, 1/1000)
  usePrecision0 ~ dgamma(1, 1)
  useSigma2_0 = 1/usePrecision0
  
  tauUse ~ dgamma(use.tau.shape0, use.tau.rate0)
  
  # use variance
  sigma2Use <- 1/tauUse
  
  # model for perf: -----------------
  
  # perf model data distribution
  for (obs in 1:N){
    # perf mean model
    muPerf[obs] <- perf.intercept + perf.female*female[obs] + perf.cc10*cc10[obs]
    perf[obs] ~ dnorm(muPerf[obs], tauPerf)    
  }
  
  
  # perf prior distributions
  perf.intercept ~ dnorm(perfMean0, perfPrecision0)
  perf.female ~ dnorm(perfMean0, perfPrecision0)
  perf.cc10 ~ dnorm(perfMean0, perfPrecision0)
  
  perfMean0 ~ dnorm(0, 1/1000)
  perfPrecision0 ~ dgamma(1, 1)
  perfSigma2_0 = 1/perfPrecision0
  
  tauPerf ~ dgamma(perf.tau.shape0, perf.tau.rate0)
  
  # perf residual variance
  sigma2Perf <- 1/tauPerf
  
  
}

# Prepare data for JAGS -- here perf and use are separate variables
model02b.jags.data = list(
  use = data02$use,
  perf = data02$perf,
  female = data02$female,
  cc10 = data02$cc10,
  N = nrow(data02),
  use.tau.shape0 = 1,
  use.tau.rate0 = 1,
  perf.tau.shape0 = 1,
  perf.tau.rate0 = 1
) 

model02b.jags.params = c(
  "pFemale",
  "muCC10",
  "tauCC10",
  "use.intercept", 
  "use.female",
  "use.cc10",
  "useMean0",
  "useSigma2_0",
  "sigma2Use", 
  "perf.intercept", 
  "perf.female",
  "perf.cc10",
  "sigma2Perf",
  "perfMean0",
  "perfSigma2_0",
  "perf[3]",
  "use[8]"
)

# Run the model using the R2jags package
model02b.jags.samples = jags(
  data = model02b.jags.data,
  parameters.to.save = model02b.jags.params,
  model.file = model02b.jags.syntax,
  n.chains = 4,
  n.iter = 6000,
  n.burnin = 1000,
  n.thin = 1
)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 1108
##    Unobserved stochastic nodes: 307
##    Total graph size: 2898
## 
## Initializing model
model02b.jags.samples 
## Inference for Bugs model at "/var/folders/9l/vt76dbhs4m98bljbrl850qsm0000gn/T//Rtmp1xBP6I/model7af31cd68a6.txt", fit using jags,
##  4 chains, each with 6000 iterations (first 1000 discarded)
##  n.sims = 20000 iterations saved. Running time = 2.021 secs
##                 mu.vect  sd.vect     2.5%      25%      50%      75%    97.5%
## muCC10           -0.216    0.420   -1.038   -0.501   -0.213    0.066    0.612
## pFemale           0.644    0.027    0.591    0.626    0.644    0.662    0.696
## perf.cc10         0.089    0.030    0.030    0.069    0.089    0.110    0.146
## perf.female      -1.397    0.353   -2.091   -1.632   -1.398   -1.162   -0.698
## perf.intercept   14.735    0.276   14.198   14.545   14.734   14.918   15.285
## perfMean0         4.366    4.951   -5.592    1.728    4.370    7.087   14.060
## perfSigma2_0     78.812  137.891   14.487   29.897   47.775   82.787  327.789
## perf[3]          15.080    2.731    9.776   13.248   15.079   16.939   20.344
## sigma2Perf        7.435    0.663    6.249    6.973    7.399    7.859    8.844
## sigma2Use       276.246   23.912  233.346  259.710  274.810  291.288  327.644
## tauCC10           0.023    0.002    0.019    0.022    0.023    0.024    0.027
## use.cc10          0.043    0.175   -0.299   -0.075    0.043    0.162    0.387
## use.female       -2.432    2.218   -6.736   -3.950   -2.435   -0.944    1.960
## use.intercept    52.090    1.741   48.682   50.931   52.085   53.255   55.470
## useMean0         13.340   13.933  -16.850    5.191   13.963   22.024   39.982
## useSigma2_0     855.634 2761.590  166.368  341.090  537.722  926.372 3205.200
## use[8]           52.370   16.672   19.753   41.215   52.364   63.750   84.879
## deviance       5679.809    7.299 5665.646 5674.943 5679.719 5684.571 5694.616
##                 Rhat n.eff
## muCC10         1.001 20000
## pFemale        1.001 20000
## perf.cc10      1.001 20000
## perf.female    1.001 20000
## perf.intercept 1.001 20000
## perfMean0      1.001 20000
## perfSigma2_0   1.001 20000
## perf[3]        1.001 15000
## sigma2Perf     1.001  8300
## sigma2Use      1.001 14000
## tauCC10        1.001 20000
## use.cc10       1.001 15000
## use.female     1.001  6300
## use.intercept  1.001  8700
## useMean0       1.001  5700
## useSigma2_0    1.001 19000
## use[8]         1.001  6000
## deviance       1.001 18000
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule: pV = var(deviance)/2)
## pV = 26.6 and DIC = 5706.4
## DIC is an estimate of expected predictive error (lower deviance is better).

Model 3: Adding Interactions

Model Description

Model 3 adds interaction terms (cc10 x female) to both the perf and use equations in the JAGS model. This allows the effect of cc10 to vary depending on female, and vice-versa. There is no equivalent lavaan model provided in the code.

R2jags Specification (model03a)

model03a.jags.syntax = function(){
  
  # model for female -------------
  for (obs in 1:N){
    female[obs] ~ dbern(pFemale)  
  }
  
  # priors for female
  pFemale ~ dbeta(1, 1)
  
  # model for cc10
  for (obs in 1:N){
    cc10[obs] ~ dnorm(muCC10, tauCC10)  
  }
  
  
  # priors for cc10
  muCC10 ~ dnorm(0, 1/1000)
  tauCC10 ~ dgamma(1, 1)
  
  # model for use: -----------------
  
  for (obs in 1:N){
    # use mean model: 
    muUse[obs] <- use.intercept + use.female*female[obs] + use.cc10*cc10[obs] + 
      use.cc10xfemale*cc10[obs]*female[obs]
    
    # use model data distribution
    use[obs] ~ dnorm(muUse[obs], tauUse)  
  }
  
  # use prior distributions -- empirical prior for regression parameters
  use.intercept ~ dnorm(useMean0, usePrecision0)
  use.female ~ dnorm(useMean0, usePrecision0)
  use.cc10 ~ dnorm(useMean0, usePrecision0)
  use.cc10xfemale ~ dnorm(useMean0, usePrecision0)
  
  useMean0 ~ dnorm(0, 1/1000)
  usePrecision0 ~ dgamma(1, 1)
  useSigma2_0 = 1/usePrecision0
  
  tauUse ~ dgamma(use.tau.shape0, use.tau.rate0)
  
  # use variance
  sigma2Use <- 1/tauUse
  
  # model for perf: -----------------
  
  # perf model data distribution
  for (obs in 1:N){
    # perf mean model
    muPerf[obs] <- perf.intercept + perf.female*female[obs] + perf.cc10*cc10[obs] + 
      perf.cc10xfemale*cc10[obs]*female[obs]
    perf[obs] ~ dnorm(muPerf[obs], tauPerf)    
  }
  
  
  # perf prior distributions
  perf.intercept ~ dnorm(perfMean0, perfPrecision0)
  perf.female ~ dnorm(perfMean0, perfPrecision0)
  perf.cc10 ~ dnorm(perfMean0, perfPrecision0)
  perf.cc10xfemale ~ dnorm(perfMean0, perfPrecision0)
  
  perfMean0 ~ dnorm(0, 1/1000)
  perfPrecision0 ~ dgamma(1, 1)
  perfSigma2_0 = 1/perfPrecision0
  
  tauPerf ~ dgamma(perf.tau.shape0, perf.tau.rate0)
  
  # perf residual variance
  sigma2Perf <- 1/tauPerf
  
  
}

# Prepare data for JAGS -- here perf and use are separate variables
model03a.jags.data = list(
  use = data02$use,
  perf = data02$perf,
  female = data02$female,
  cc10 = data02$cc10,
  N = nrow(data02),
  use.tau.shape0 = 1,
  use.tau.rate0 = 1,
  perf.tau.shape0 = 1,
  perf.tau.rate0 = 1
) 

model03a.jags.params = c(
  "pFemale",
  "muCC10",
  "tauCC10",
  "use.intercept", 
  "use.female",
  "use.cc10",
  "use.cc10xfemale",
  "useMean0",
  "useSigma2_0",
  "sigma2Use", 
  "perf.intercept", 
  "perf.female",
  "perf.cc10",
  "perf.cc10xfemale",
  "sigma2Perf",
  "perfMean0",
  "perfSigma2_0",
  "perf[3]",
  "use[8]"
)

# Run the model using the R2jags package
model03a.jags.samples = jags(
  data = model03a.jags.data,
  parameters.to.save = model03a.jags.params,
  model.file = model03a.jags.syntax,
  n.chains = 4,
  n.iter = 6000,
  n.burnin = 1000,
  n.thin = 1
)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 1108
##    Unobserved stochastic nodes: 309
##    Total graph size: 3600
## 
## Initializing model
model03a.jags.samples 
## Inference for Bugs model at "/var/folders/9l/vt76dbhs4m98bljbrl850qsm0000gn/T//Rtmp1xBP6I/model7af31973e14d.txt", fit using jags,
##  4 chains, each with 6000 iterations (first 1000 discarded)
##  n.sims = 20000 iterations saved. Running time = 2.929 secs
##                   mu.vect sd.vect     2.5%      25%      50%      75%    97.5%
## muCC10             -0.206   0.424   -1.037   -0.494   -0.209    0.079    0.620
## pFemale             0.642   0.026    0.590    0.625    0.643    0.660    0.694
## perf.cc10           0.055   0.043   -0.029    0.026    0.055    0.084    0.139
## perf.cc10xfemale    0.068   0.060   -0.052    0.027    0.067    0.108    0.184
## perf.female        -1.389   0.350   -2.076   -1.627   -1.391   -1.151   -0.713
## perf.intercept     14.722   0.273   14.188   14.536   14.720   14.907   15.255
## perfMean0           3.360   3.662   -3.983    1.215    3.360    5.468   10.825
## perfSigma2_0       56.626  61.662   13.642   26.324   39.757   64.326  204.609
## perf[3]            14.927   2.738    9.573   13.083   14.952   16.795   20.298
## sigma2Perf          7.366   0.662    6.178    6.901    7.332    7.789    8.758
## sigma2Use         275.362  23.713  233.124  258.776  273.837  290.353  325.860
## tauCC10             0.023   0.002    0.019    0.022    0.023    0.024    0.027
## use.cc10           -0.158   0.269   -0.689   -0.338   -0.157    0.023    0.370
## use.cc10xfemale     0.361   0.361   -0.348    0.117    0.361    0.606    1.066
## use.female         -2.275   2.219   -6.634   -3.763   -2.261   -0.788    2.086
## use.intercept      51.991   1.735   48.595   50.820   51.985   53.151   55.398
## useMean0           10.887  11.404  -12.707    4.049   11.027   17.898   33.267
## useSigma2_0       656.524 687.981  158.101  307.533  471.570  767.245 2273.740
## use[8]             51.299  16.829   18.575   39.820   51.235   62.585   84.910
## deviance         5676.145   8.500 5659.599 5670.422 5676.169 5681.773 5692.829
##                   Rhat n.eff
## muCC10           1.001 20000
## pFemale          1.001 20000
## perf.cc10        1.002  3700
## perf.cc10xfemale 1.001  5700
## perf.female      1.001 20000
## perf.intercept   1.001 20000
## perfMean0        1.001 20000
## perfSigma2_0     1.001 20000
## perf[3]          1.001 20000
## sigma2Perf       1.001  8800
## sigma2Use        1.001 13000
## tauCC10          1.001  6500
## use.cc10         1.001 12000
## use.cc10xfemale  1.001  7800
## use.female       1.001 20000
## use.intercept    1.001 20000
## useMean0         1.001 20000
## useSigma2_0      1.001 13000
## use[8]           1.001 19000
## deviance         1.002  2000
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule: pV = var(deviance)/2)
## pV = 36.1 and DIC = 5712.2
## DIC is an estimate of expected predictive error (lower deviance is better).

Model 4: Adding Auxiliary Variables

Model Description

Model 4 expands the model to include additional auxiliary variables (hsl, msc, mas, and mse). Critically, because these auxiliary variables also have missing data, the JAGS model now includes a joint model for all the variables with missingness (female, cc10, hsl, msc, mas, and mse).This joint model is factored into a series of regressions.

R2jags Specification (model04a)

model04a.jags.syntax = function(){
  
  # model for hsl ---------------

  for (obs in 1:N){
    hsl[obs] ~ dnorm(muHSL, tauHSL)  
  }
  
  # priors for hsl
  muHSL ~ dnorm(0, 1/1000)
  tauHSL ~ dgamma(1, 1)
  
  # model for msc ---------------
  
  for (obs in 1:N){
    mscMean[obs] <- msc.intercept + msc.hsl*hsl[obs]
    msc[obs] ~ dnorm(mscMean[obs], tauMSC)
  }
  
  # priors for msc
  msc.intercept ~ dnorm(muMSC0, tauMSC0)
  msc.hsl ~ dnorm(muMSC0, tauMSC0)
  muMSC0 ~ dnorm(0, 1/1000)
  tauMSC0 ~ dgamma(1, 1)
  tauMSC ~ dgamma(1, 1)
  
  # model for mas ---------------
  
  for (obs in 1:N){
    masMean[obs] <- mas.intercept + mas.hsl*hsl[obs] + mas.msc*msc[obs]
    mas[obs] ~ dnorm(masMean[obs], tauMAS)
  }
  
  # priors for mas
  mas.intercept ~ dnorm(muMAS0, taoMAS0)
  mas.hsl ~ dnorm(muMAS0, taoMAS0)
  mas.msc ~ dnorm(muMAS0, taoMAS0)
  muMAS0 ~ dnorm(0, 1/1000)
  taoMAS0 ~ dgamma(1, 1)
  tauMAS ~ dgamma(1, 1)
  
  # model for mse ---------------
  
  for (obs in 1:N){
    mseMean[obs] <- mse.intercept + mse.hsl*hsl[obs] + mse.msc*msc[obs] + mse.mas*mas[obs]
    mse[obs] ~ dnorm(mseMean[obs], tauMSE)
  }
  
  # priors for mse
  mse.intercept ~ dnorm(muMSE0, tauMSE0)
  mse.hsl ~ dnorm(muMSE0, tauMSE0)
  mse.msc ~ dnorm(muMSE0, tauMSE0)
  mse.mas ~ dnorm(muMSE0, tauMSE0)
  muMSE0 ~ dnorm(0, 1/1000)
  tauMSE0 ~ dgamma(1, 1)
  tauMSE ~ dgamma(1, 1)
  
    
  # model for female -------------
  for (obs in 1:N){
    pFemale[obs] = ilogit(female.intercept + female.hsl*hsl[obs] + 
                            female.msc*msc[obs] + female.mas*mas[obs] + female.mse*mse[obs])
    female[obs] ~ dbern(pFemale[obs])
  }
  
  # priors for female
  female.intercept ~ dnorm(muFemale0, tauFemale0)
  female.hsl ~ dnorm(muFemale0, tauFemale0)
  female.msc ~ dnorm(muFemale0, tauFemale0)
  female.mas ~ dnorm(muFemale0, tauFemale0)
  female.mse ~ dnorm(muFemale0, tauFemale0)
  muFemale0 ~ dnorm(0, 1/1000)
  tauFemale0 ~ dgamma(1, 1)
  
  
  # model for cc10
  for (obs in 1:N){
    muCC10[obs] = cc10.intercept + cc10.hsl*hsl[obs] + cc10.msc*msc[obs] + 
      cc10.mas*mas[obs] + cc10.mse*mse[obs]
    cc10[obs] ~ dnorm(muCC10[obs], tauCC10)  
  }
  
  
  # priors for cc10
  cc10.intercept ~ dnorm(muCC10_0, tauCC10_0)
  cc10.hsl ~ dnorm(muCC10_0, tauCC10_0)
  cc10.msc ~ dnorm(muCC10_0, tauCC10_0)
  cc10.mas ~ dnorm(muCC10_0, tauCC10_0)
  cc10.mse ~ dnorm(muCC10_0, tauCC10_0)
  muCC10_0 ~ dnorm(0, 1/1000)
  tauCC10_0 ~ dgamma(1, 1)
  
  tauCC10 ~ dgamma(1, 1)
  
  # model for use: -----------------
  
  for (obs in 1:N){
    # use mean model: 
    muUse[obs] <- use.intercept + use.female*female[obs] + use.cc10*cc10[obs] + 
      use.cc10xfemale*cc10[obs]*female[obs] + use.hsl*hsl[obs] + use.msc*msc[obs] + 
      use.mas*mas[obs] + use.mse*mse[obs]
    
    # use model data distribution
    use[obs] ~ dnorm(muUse[obs], tauUse)  
  }
  
  # use prior distributions -- empirical prior for regression parameters
  use.intercept ~ dnorm(useMean0, usePrecision0)
  use.female ~ dnorm(useMean0, usePrecision0)
  use.cc10 ~ dnorm(useMean0, usePrecision0)
  use.cc10xfemale ~ dnorm(useMean0, usePrecision0)
  use.hsl ~ dnorm(useMean0, usePrecision0)
  use.msc ~ dnorm(useMean0, usePrecision0)
  use.mas ~ dnorm(useMean0, usePrecision0)
  use.mse ~ dnorm(useMean0, usePrecision0)
  
  useMean0 ~ dnorm(0, 1/1000)
  usePrecision0 ~ dgamma(1, 1)
  useSigma2_0 = 1/usePrecision0
  
  tauUse ~ dgamma(use.tau.shape0, use.tau.rate0)
  
  # use variance
  sigma2Use <- 1/tauUse
  
  # model for perf: -----------------
  
  # perf model data distribution
  for (obs in 1:N){
    # perf mean model
    muPerf[obs] <- perf.intercept + perf.female*female[obs] + perf.cc10*cc10[obs] + 
      perf.cc10xfemale*cc10[obs]*female[obs] + perf.hsl*hsl[obs] + perf.msc*msc[obs] +
      perf.mas*mas[obs] + perf.mse*mse[obs]
    perf[obs] ~ dnorm(muPerf[obs], tauPerf)    
  }
  
  
  # perf prior distributions
  perf.intercept ~ dnorm(perfMean0, perfPrecision0)
  perf.female ~ dnorm(perfMean0, perfPrecision0)
  perf.cc10 ~ dnorm(perfMean0, perfPrecision0)
  perf.cc10xfemale ~ dnorm(perfMean0, perfPrecision0)
  perf.hsl ~ dnorm(perfMean0, perfPrecision0)
  perf.msc ~ dnorm(perfMean0, perfPrecision0)
  perf.mas ~ dnorm(perfMean0, perfPrecision0)
  perf.mse ~ dnorm(perfMean0, perfPrecision0)
  
  perfMean0 ~ dnorm(0, 1/1000)
  perfPrecision0 ~ dgamma(1, 1)
  perfSigma2_0 = 1/perfPrecision0
  
  tauPerf ~ dgamma(perf.tau.shape0, perf.tau.rate0)
  
  # perf residual variance
  sigma2Perf <- 1/tauPerf
  
  
}

# Prepare data for JAGS -- here perf and use are separate variables
model04a.jags.data = list(
  use = data02$use,
  perf = data02$perf,
  female = data02$female,
  cc10 = data02$cc10,
  hsl = data02$hsl,
  msc = data02$msc,
  mas = data02$mas,
  mse = data02$mse,
  N = nrow(data02),
  use.tau.shape0 = 1,
  use.tau.rate0 = 1,
  perf.tau.shape0 = 1,
  perf.tau.rate0 = 1
) 

model04a.jags.params = c(
  "muHSL",
  "tauHSL",
  "msc.intercept",
  "msc.hsl",
  "tauMSC",
  "mas.intercept",
  "mas.hsl",
  "mas.msc",
  "tauMAS",
  "mse.intercept",
  "mse.hsl",
  "mse.msc",
  "mse.mas",
  "tauMSE",
  "female.intercept",
  "female.hsl",
  "female.msc",
  "female.mas",
  "female.mse",
  "cc10.intercept",
  "cc10.hsl",
  "cc10.msc",
  "cc10.mas",
  "cc10.mse",
  "muCC10_0",
  "tauCC10_0",
  "use.intercept", 
  "use.female",
  "use.cc10",
  "use.cc10xfemale",
  "use.hsl",
  "use.msc",
  "use.mas",
  "use.mse",
  "useMean0",
  "useSigma2_0",
  "sigma2Use", 
  "perf.intercept", 
  "perf.female",
  "perf.cc10",
  "perf.cc10xfemale",
  "perf.hsl",
  "perf.msc",
  "perf.mas",
  "perf.mse",
  "sigma2Perf",
  "perfMean0",
  "perfSigma2_0",
  "perf[3]",
  "use[8]"
)

# Run the model using the R2jags package
model04a.jags.samples = jags(
  data = model04a.jags.data,
  parameters.to.save = model04a.jags.params,
  model.file = model04a.jags.syntax,
  n.chains = 4,
  n.iter = 6000,
  n.burnin = 1000,
  n.thin = 1
)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 2508
##    Unobserved stochastic nodes: 349
##    Total graph size: 14840
## 
## Initializing model
model04a.jags.samples 
## Inference for Bugs model at "/var/folders/9l/vt76dbhs4m98bljbrl850qsm0000gn/T//Rtmp1xBP6I/model7af36fe02549.txt", fit using jags,
##  4 chains, each with 6000 iterations (first 1000 discarded)
##  n.sims = 20000 iterations saved. Running time = 13.298 secs
##                    mu.vect sd.vect      2.5%       25%       50%       75%
## cc10.hsl             0.384   0.399    -0.392     0.119     0.365     0.644
## cc10.intercept      -2.007   2.442    -8.802    -2.741    -1.203    -0.420
## cc10.mas            -0.033   0.078    -0.192    -0.084    -0.030     0.020
## cc10.msc             0.046   0.051    -0.045     0.009     0.044     0.080
## cc10.mse            -0.017   0.047    -0.101    -0.047    -0.020     0.010
## female.hsl           0.271   0.130     0.018     0.182     0.270     0.358
## female.intercept     3.606   1.002     1.710     2.928     3.586     4.262
## female.mas          -0.066   0.024    -0.114    -0.082    -0.066    -0.050
## female.msc           0.025   0.016    -0.006     0.014     0.025     0.035
## female.mse          -0.046   0.017    -0.081    -0.057    -0.046    -0.034
## mas.hsl              0.312   0.257    -0.222     0.149     0.319     0.488
## mas.intercept        1.725   1.045    -0.055     0.989     1.620     2.349
## mas.msc              0.572   0.020     0.534     0.559     0.572     0.586
## msc.hsl              8.062   0.481     6.871     7.821     8.159     8.394
## msc.intercept       10.350   2.363     7.066     8.726     9.782    11.460
## mse.hsl              2.373   0.417     1.571     2.090     2.363     2.642
## mse.intercept       43.677   1.949    39.800    42.370    43.688    45.016
## mse.mas              0.263   0.076     0.111     0.212     0.263     0.314
## mse.msc              0.195   0.052     0.089     0.161     0.195     0.230
## muCC10_0            -0.329   0.860    -2.523    -0.584    -0.178     0.127
## muHSL                4.883   0.063     4.760     4.841     4.883     4.926
## perf.cc10            0.002   0.030    -0.057    -0.019     0.002     0.022
## perf.cc10xfemale    -0.005   0.042    -0.088    -0.034    -0.005     0.023
## perf.female          0.083   0.225    -0.358    -0.067     0.084     0.235
## perf.hsl             0.129   0.119    -0.099     0.047     0.129     0.212
## perf.intercept       0.330   0.500    -0.617    -0.001     0.311     0.641
## perf.mas             0.037   0.020    -0.004     0.024     0.038     0.051
## perf.msc             0.008   0.014    -0.019    -0.002     0.007     0.017
## perf.mse             0.154   0.012     0.129     0.146     0.155     0.162
## perfMean0            0.092   0.214    -0.327    -0.043     0.089     0.224
## perfSigma2_0         0.341   0.227     0.119     0.202     0.282     0.407
## perf[3]             12.309   1.921     8.534    11.028    12.309    13.606
## sigma2Perf           3.603   0.314     3.041     3.384     3.588     3.800
## sigma2Use          205.530  17.995   173.506   192.895   204.423   217.056
## tauCC10_0            1.576   1.404     0.057     0.483     1.214     2.275
## tauHSL               0.733   0.055     0.628     0.695     0.731     0.769
## tauMAS               0.032   0.002     0.028     0.031     0.032     0.034
## tauMSC               0.005   0.000     0.004     0.004     0.005     0.005
## tauMSE               0.015   0.001     0.013     0.015     0.015     0.016
## use.cc10            -0.317   0.211    -0.730    -0.458    -0.316    -0.174
## use.cc10xfemale      0.131   0.280    -0.413    -0.058     0.128     0.317
## use.female           0.925   1.000    -0.691     0.258     0.798     1.444
## use.hsl             -0.476   0.642    -1.891    -0.861    -0.439    -0.030
## use.intercept        0.756   1.204    -1.049     0.029     0.590     1.254
## use.mas             -0.221   0.157    -0.532    -0.328    -0.219    -0.114
## use.msc              0.610   0.106     0.408     0.538     0.610     0.679
## use.mse              0.391   0.060     0.274     0.352     0.392     0.430
## useMean0             0.222   0.406    -0.490    -0.023     0.192     0.425
## useSigma2_0          0.940   1.343     0.199     0.393     0.608     1.034
## use[8]              59.401  14.387    31.112    49.784    59.369    69.155
## deviance         14003.276  10.846 13982.758 13995.896 14003.040 14010.186
##                      97.5%  Rhat n.eff
## cc10.hsl             1.194 1.011   270
## cc10.intercept       0.684 1.106    42
## cc10.mas             0.113 1.020   130
## cc10.msc             0.149 1.031    89
## cc10.mse             0.086 1.068    51
## female.hsl           0.528 1.001 15000
## female.intercept     5.643 1.002  1700
## female.mas          -0.020 1.001 19000
## female.msc           0.056 1.001 20000
## female.mse          -0.013 1.002  2400
## mas.hsl              0.793 1.003  1800
## mas.intercept        4.056 1.005  1000
## mas.msc              0.611 1.001 14000
## msc.hsl              8.768 1.014   230
## msc.intercept       16.297 1.012   240
## mse.hsl              3.220 1.005   600
## mse.intercept       47.438 1.008   370
## mse.mas              0.415 1.002  6300
## mse.msc              0.295 1.002  4800
## muCC10_0             0.856 1.053   130
## muHSL                5.005 1.001 16000
## perf.cc10            0.062 1.003  1300
## perf.cc10xfemale     0.077 1.002  1600
## perf.female          0.522 1.001  5500
## perf.hsl             0.363 1.002  2100
## perf.intercept       1.389 1.010   290
## perf.mas             0.076 1.026   100
## perf.msc             0.036 1.041    66
## perf.mse             0.175 1.011   250
## perfMean0            0.527 1.001  5700
## perfSigma2_0         0.911 1.001  4500
## perf[3]             16.027 1.001 17000
## sigma2Perf           4.273 1.001 20000
## sigma2Use          243.626 1.001 20000
## tauCC10_0            5.212 1.057    63
## tauHSL               0.843 1.001 20000
## tauMAS               0.037 1.001 20000
## tauMSC               0.005 1.001 20000
## tauMSE               0.018 1.001 20000
## use.cc10             0.092 1.001  7200
## use.cc10xfemale      0.686 1.002  2000
## use.female           3.293 1.001 11000
## use.hsl              0.666 1.005   660
## use.intercept        3.628 1.002  4600
## use.mas              0.081 1.002  2600
## use.msc              0.824 1.002  2800
## use.mse              0.510 1.005   700
## useMean0             1.128 1.001  7200
## useSigma2_0          3.600 1.001  7400
## use[8]              87.606 1.001 20000
## deviance         14025.615 1.010   290
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule: pV = var(deviance)/2)
## pV = 58.2 and DIC = 14061.5
## DIC is an estimate of expected predictive error (lower deviance is better).