lavaanif (!require(lavaan)) install.packages("lavaan")
## Loading required package: lavaan
## This is lavaan 0.6-19
## lavaan is FREE software! Please report any bugs.
library(lavaan)
Example data: 1336 college students self-reporting on 49 items (measuring five factors) assessing childhood maltreatment: Items are answered on a 1–5 scale: 1=Strongly Disagree, 2=Disagree, 3=Neutral, 4=Agree, 5=Strongly Agree. The items are NOT normally distributed, so we’ll use both CFA with MLR and IFA with WLSMV as two options to examine the fit of these models (as an example of how to do each, but NOT to compare between estimators).
1. Spurning: Verbal and nonverbal caregiver acts that reject and degrade a child
2. Terrorizing: Caregiver behaviors that threaten or are likely to physically hurt, kill, abandon, or place the child or the child’s loved ones or objects in recognizably dangerous situations.
3. Isolating: Caregiver acts that consistently deny the child opportunities to meet needs for interacting or communicating with peers or adults inside or outside the home.
4. Corrupting: Caregiver acts that encourage the child to develop inappropriate behaviors (self-destructive, antisocial, criminal, deviant, or other maladaptive behaviors).
5. Ignoring: Emotional unresponsiveness includes caregiver acts that ignore the child’s attempts and needs to interact (failing to express affection, caring, and love for the child) and show no emotion in interactions with the child
abuseData = read.csv(file = "abuse.csv", col.names = c("ID", paste0("p0",1:9), paste0("p",10:57)))
First, we separately build each one-factor model:
spurningSyntax = "
spurn =~ p06 + p10 + p14 + p25 + p27 + p29 + p33 + p35 + p48 + p49 + p53 + p54
"
spurningEstimatesMLR = cfa(model = spurningSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "MLR")
fitResultsMLR = data.frame(Model = "Spurning", rbind(inspect(object = spurningEstimatesMLR, what = "fit")), stringsAsFactors = FALSE)
spurningEstimatesWLSMV = cfa(model = spurningSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "WLSMV",
ordered = c("p06", "p10", "p14", "p25", "p27", "p29", "p33", "p35", "p48", "p49", "p53", "p54"),
parameterization = "theta")
fitResultsWLSMV = data.frame(Model = "Spurning", rbind(inspect(object = spurningEstimatesWLSMV, what = "fit")), stringsAsFactors = FALSE)
spurningParams = cbind(inspect(object = spurningEstimatesMLR, what = "std")$lambda, inspect(object = spurningEstimatesWLSMV, what = "std")$lambda)
colnames(spurningParams) = c("spurningMLR", "spurningWLSMV")
terrorizingSyntax = "
terror =~ p07 + p11 + p13 + p17 + p24 + p26 + p36 + p55 + p56
"
terrorizingEstimatesMLR = cfa(model = terrorizingSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "MLR")
fitResultsMLR = rbind(fitResultsMLR, c("Terrorizing", inspect(object = terrorizingEstimatesMLR, what = "fit")))
terrorizingEstimatesWLSMV = cfa(model = terrorizingSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "WLSMV",
ordered = c("p07", "p11", "p13", "p17", "p24", "p26", "p36", "p55", "p56"), parameterization = "theta")
fitResultsWLSMV = rbind(fitResultsWLSMV, c("Terrorizing", inspect(object = terrorizingEstimatesWLSMV, what = "fit")))
terrorizingParams = cbind(inspect(object = terrorizingEstimatesMLR, what = "std")$lambda, inspect(object = terrorizingEstimatesWLSMV, what = "std")$lambda)
colnames(terrorizingParams) = c("terrorizingMLR", "terrorizingWLSMV")
isolatingSyntax = "
isolate =~ p01 + p18 + p19 + p23 + p39 + p43
"
isolatingEstimatesMLR = cfa(model = isolatingSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "MLR")
fitResultsMLR = rbind(fitResultsMLR, c("Isolating", inspect(object = isolatingEstimatesMLR, what = "fit")))
isolatingEstimatesWLSMV = cfa(model = isolatingSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "WLSMV",
ordered = c("p01", "p18", "p19", "p23", "p39", "p43"), parameterization = "theta")
fitResultsWLSMV = rbind(fitResultsWLSMV, c("Isolating", inspect(object = isolatingEstimatesWLSMV, what = "fit")))
isolatingParams = cbind(inspect(object = isolatingEstimatesMLR, what = "std")$lambda, inspect(object = isolatingEstimatesWLSMV, what = "std")$lambda)
colnames(isolatingParams) = c("isolatingMLR", "isolatingWLSMV")
corruptingSyntax = "
corrupt =~ p09 + p12 + p16 + p20 + p28 + p47 + p50
"
corruptingEstimatesMLR = cfa(model = corruptingSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "MLR")
fitResultsMLR = rbind(fitResultsMLR, c("Corrupting", inspect(object = corruptingEstimatesMLR, what = "fit")))
corruptingEstimatesWLSMV = cfa(model = corruptingSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "WLSMV",
ordered = c("p09", "p12", "p16", "p20", "p28", "p47", "p50"), parameterization = "theta")
fitResultsWLSMV = rbind(fitResultsWLSMV, c("Corrupting", inspect(object = corruptingEstimatesWLSMV, what = "fit")))
corruptingParams = cbind(inspect(object = corruptingEstimatesMLR, what = "std")$lambda, inspect(object = corruptingEstimatesWLSMV, what = "std")$lambda)
colnames(corruptingParams) = c("corruptingMLR", "corruptingWLSMV")
ignoringSyntax = "
ignore =~ p02 + p03 + p04 + p21 + p22 + p30 + p31 + p37 + p40 + p44 + p45 + p46 + p51 + p52 + p57
"
ignoringEstimatesMLR = cfa(model = ignoringSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "MLR")
fitResultsMLR = rbind(fitResultsMLR, c("Ignoring", inspect(object = ignoringEstimatesMLR, what = "fit")))
ignoringEstimatesWLSMV = cfa(model = ignoringSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "WLSMV",
ordered = c("p02", "p03", "p04", "p21", "p22", "p30", "p31", "p37", "p40", "p44", "p45", "p46", "p51", "p52", "p57"),
parameterization = "theta")
fitResultsWLSMV = rbind(fitResultsWLSMV, c("Ignoring", inspect(object = ignoringEstimatesWLSMV, what = "fit")))
ignoringParams = cbind(inspect(object = ignoringEstimatesMLR, what = "std")$lambda, inspect(object = ignoringEstimatesWLSMV, what = "std")$lambda)
colnames(ignoringParams) = c("ignoringMLR", "ignoringWLSMV")
fitResultsMLR[,c("Model", "chisq.scaled", "chisq.scaling.factor", "df.scaled", "pvalue.scaled", "cfi.scaled", "tli.scaled","rmsea.scaled")]
## Model chisq.scaled chisq.scaling.factor df.scaled
## 1 Spurning 226.152905765186 1.4037561252742 54
## 2 Terrorizing 189.803721116701 1.58656762802196 27
## 3 Isolating 80.259847624197 1.4932507532388 9
## 4 Corrupting 55.0538883570252 1.90789623870983 14
## 5 Ignoring 486.908843220687 1.79764025612883 90
## pvalue.scaled cfi.scaled tli.scaled rmsea.scaled
## 1 0 0.958371099752682 0.949120233031056 0.0488674487326684
## 2 0 0.918215012424422 0.89095334989923 0.0672062477520051
## 3 1.43551837084033e-13 0.916479746946289 0.860799578243815 0.077012382131692
## 4 8.47536313464126e-07 0.933846546194285 0.900769819291427 0.0468675765911613
## 5 0 0.931548188466559 0.920139553210985 0.0574755981962841
fitResultsWLSMV[,c("Model", "chisq.scaled", "chisq.scaling.factor", "df.scaled", "pvalue.scaled", "cfi.scaled", "tli.scaled","rmsea.scaled")]
## Model chisq.scaled chisq.scaling.factor df.scaled
## 1 Spurning 295.045952463798 0.496749579834486 54
## 2 Terrorizing 263.19997326362 0.500469180688551 27
## 3 Isolating 129.654419809325 0.543624947898763 9
## 4 Corrupting 87.8190972192855 0.475150597808876 14
## 5 Ignoring 897.971800277154 0.391492589079186 90
## pvalue.scaled cfi.scaled tli.scaled rmsea.scaled
## 1 0 0.98339841593589 0.979709175032755 0.0578462389201534
## 2 0 0.96554644739071 0.95406192985428 0.0809804387040941
## 3 0 0.962025643940732 0.936709406567887 0.10024724549331
## 4 9.78217506997225e-13 0.976165199351843 0.964247799027764 0.0628698511593426
## 5 0 0.9759983142346 0.9719980332737 0.082034952726923
spurningParams
## spurningMLR spurningWLSMV
## p06 0.5992433 0.6592699
## p10 0.4564849 0.5278419
## p14 0.7688568 0.8366025
## p25 0.5259160 0.5961952
## p27 0.6067552 0.6766139
## p29 0.8159279 0.8649677
## p33 0.8350497 0.9067893
## p35 0.4652622 0.5375738
## p48 0.5160559 0.7272325
## p49 0.6552668 0.7439243
## p53 0.6793626 0.7605317
## p54 0.6098414 0.6799810
terrorizingParams
## terrorizingMLR terrorizingWLSMV
## p07 0.5117382 0.6168137
## p11 0.6733019 0.7708374
## p13 0.4510228 0.7125691
## p17 0.6114623 0.7212506
## p24 0.5706406 0.7869150
## p26 0.5537510 0.6172249
## p36 0.6847511 0.8045284
## p55 0.6430094 0.7424127
## p56 0.7318947 0.8150255
isolatingParams
## isolatingMLR isolatingWLSMV
## p01 0.5212017 0.6953674
## p18 0.5498697 0.6289841
## p19 0.5444853 0.6847115
## p23 0.5399954 0.6283148
## p39 0.5630337 0.7260244
## p43 0.7522344 0.8217907
corruptingParams
## corruptingMLR corruptingWLSMV
## p09 0.5893723 0.7389980
## p12 0.5447413 0.7134776
## p16 0.3763251 0.5245557
## p20 0.5448756 0.8542732
## p28 0.6314027 0.8259521
## p47 0.5799805 0.7077427
## p50 0.6459726 0.8399461
ignoringParams
## ignoringMLR ignoringWLSMV
## p02 0.6719748 0.8127687
## p03 0.6541416 0.7491697
## p04 0.6569606 0.7487570
## p21 0.7241192 0.8010096
## p22 0.4450319 0.5401476
## p30 0.7450196 0.8332850
## p31 0.8464995 0.9130228
## p37 0.7133710 0.8129383
## p40 0.8075920 0.8909627
## p44 0.7565607 0.8494004
## p45 0.6559023 0.7952916
## p46 0.8297153 0.9044044
## p51 0.7113396 0.8056177
## p52 0.7393311 0.8149049
## p57 0.8249563 0.9176912
cfaHigherSyntax = "
spurn =~ p06 + p10 + p14 + p25 + p27 + p29 + p33 + p35 + p48 + p49 + p53 + p54
terror =~ p07 + p11 + p13 + p17 + p24 + p26 + p36 + p55 + p56
isolate =~ p01 + p18 + p19 + p23 + p39 + p43
corrupt =~ p09 + p12 + p16 + p20 + p28 + p47 + p50
ignore =~ p02 + p03 + p04 + p21 + p22 + p30 + p31 + p37 + p40 + p44 + p45 + p46 + p51 + p52 + p57
abuse =~ spurn + terror + isolate + corrupt + ignore
"
cfaHigherEstimates = cfa(model = cfaHigherSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "MLR")
summary(cfaHigherEstimates, fit.measures = TRUE, rsquare = TRUE, standardized = TRUE)
## lavaan 0.6-19 ended normally after 75 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 152
##
## Number of observations 1335
## Number of missing patterns 1
##
## Model Test User Model:
## Standard Scaled
## Test Statistic 6597.050 4489.494
## Degrees of freedom 1122 1122
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.469
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 35067.550 22808.622
## Degrees of freedom 1176 1176
## P-value 0.000 0.000
## Scaling correction factor 1.537
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.838 0.844
## Tucker-Lewis Index (TLI) 0.831 0.837
##
## Robust Comparative Fit Index (CFI) 0.852
## Robust Tucker-Lewis Index (TLI) 0.845
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -69010.792 -69010.792
## Scaling correction factor 2.505
## for the MLR correction
## Loglikelihood unrestricted model (H1) -65712.267 -65712.267
## Scaling correction factor 1.593
## for the MLR correction
##
## Akaike (AIC) 138325.584 138325.584
## Bayesian (BIC) 139115.480 139115.480
## Sample-size adjusted Bayesian (SABIC) 138632.643 138632.643
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.060 0.047
## 90 Percent confidence interval - lower 0.059 0.046
## 90 Percent confidence interval - upper 0.062 0.049
## P-value H_0: RMSEA <= 0.050 0.000 1.000
## P-value H_0: RMSEA >= 0.080 0.000 0.000
##
## Robust RMSEA 0.057
## 90 Percent confidence interval - lower 0.056
## 90 Percent confidence interval - upper 0.059
## P-value H_0: Robust RMSEA <= 0.050 0.000
## P-value H_0: Robust RMSEA >= 0.080 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.058 0.058
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## spurn =~
## p06 1.000 0.697 0.579
## p10 0.792 0.062 12.710 0.000 0.552 0.445
## p14 1.065 0.063 17.020 0.000 0.742 0.763
## p25 0.896 0.061 14.750 0.000 0.624 0.524
## p27 1.015 0.059 17.275 0.000 0.707 0.594
## p29 1.319 0.069 19.076 0.000 0.919 0.796
## p33 1.141 0.064 17.881 0.000 0.795 0.824
## p35 0.747 0.063 11.849 0.000 0.520 0.512
## p48 0.545 0.060 9.101 0.000 0.380 0.568
## p49 0.927 0.061 15.296 0.000 0.646 0.664
## p53 1.041 0.063 16.504 0.000 0.725 0.681
## p54 1.098 0.069 15.909 0.000 0.765 0.628
## terror =~
## p07 1.000 0.483 0.534
## p11 1.341 0.097 13.871 0.000 0.648 0.673
## p13 0.622 0.065 9.628 0.000 0.301 0.451
## p17 1.070 0.088 12.209 0.000 0.517 0.600
## p24 0.610 0.058 10.589 0.000 0.295 0.576
## p26 1.247 0.111 11.190 0.000 0.603 0.602
## p36 1.228 0.098 12.497 0.000 0.594 0.673
## p55 1.589 0.130 12.226 0.000 0.768 0.633
## p56 1.793 0.134 13.405 0.000 0.866 0.706
## isolate =~
## p01 1.000 0.358 0.491
## p18 2.139 0.219 9.778 0.000 0.766 0.611
## p19 1.209 0.117 10.344 0.000 0.433 0.606
## p23 1.685 0.168 10.003 0.000 0.603 0.591
## p39 0.903 0.088 10.281 0.000 0.323 0.488
## p43 1.557 0.134 11.633 0.000 0.558 0.672
## corrupt =~
## p09 1.000 0.360 0.602
## p12 0.961 0.103 9.367 0.000 0.346 0.541
## p16 1.014 0.116 8.772 0.000 0.365 0.368
## p20 0.645 0.080 8.086 0.000 0.232 0.497
## p28 1.177 0.097 12.150 0.000 0.424 0.624
## p47 1.347 0.112 12.030 0.000 0.485 0.614
## p50 1.041 0.074 14.038 0.000 0.375 0.649
## ignore =~
## p02 1.000 0.461 0.681
## p03 1.318 0.082 16.103 0.000 0.607 0.653
## p04 1.139 0.072 15.742 0.000 0.525 0.651
## p21 1.317 0.093 14.221 0.000 0.607 0.717
## p22 1.046 0.081 12.923 0.000 0.482 0.474
## p30 1.504 0.090 16.643 0.000 0.693 0.743
## p31 1.437 0.082 17.503 0.000 0.662 0.841
## p37 1.161 0.078 14.958 0.000 0.535 0.708
## p40 1.431 0.081 17.590 0.000 0.659 0.807
## p44 1.302 0.079 16.483 0.000 0.600 0.764
## p45 0.915 0.053 17.137 0.000 0.422 0.670
## p46 1.439 0.083 17.406 0.000 0.663 0.822
## p51 1.484 0.099 14.976 0.000 0.684 0.700
## p52 1.673 0.106 15.728 0.000 0.771 0.753
## p57 1.302 0.072 18.106 0.000 0.600 0.823
## abuse =~
## spurn 1.000 0.971 0.971
## terror 0.680 0.064 10.571 0.000 0.952 0.952
## isolate 0.494 0.056 8.762 0.000 0.934 0.934
## corrupt 0.397 0.049 8.188 0.000 0.745 0.745
## ignore 0.577 0.054 10.585 0.000 0.846 0.846
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .p06 2.520 0.033 76.549 0.000 2.520 2.095
## .p10 2.208 0.034 65.045 0.000 2.208 1.780
## .p14 1.600 0.027 60.165 0.000 1.600 1.647
## .p25 2.029 0.033 62.184 0.000 2.029 1.702
## .p27 2.229 0.033 68.385 0.000 2.229 1.872
## .p29 1.898 0.032 60.059 0.000 1.898 1.644
## .p33 1.601 0.026 60.633 0.000 1.601 1.659
## .p35 1.776 0.028 63.917 0.000 1.776 1.749
## .p48 1.236 0.018 67.548 0.000 1.236 1.849
## .p49 1.649 0.027 61.987 0.000 1.649 1.697
## .p53 1.844 0.029 63.324 0.000 1.844 1.733
## .p54 1.934 0.033 58.053 0.000 1.934 1.589
## .p07 1.622 0.025 65.517 0.000 1.622 1.793
## .p11 1.586 0.026 60.218 0.000 1.586 1.648
## .p13 1.213 0.018 66.573 0.000 1.213 1.822
## .p17 1.493 0.024 63.352 0.000 1.493 1.734
## .p24 1.196 0.014 85.313 0.000 1.196 2.335
## .p26 2.026 0.027 73.948 0.000 2.026 2.024
## .p36 1.459 0.024 60.477 0.000 1.459 1.655
## .p55 1.837 0.033 55.295 0.000 1.837 1.513
## .p56 1.923 0.034 57.270 0.000 1.923 1.567
## .p01 1.303 0.020 65.295 0.000 1.303 1.787
## .p18 2.318 0.034 67.527 0.000 2.318 1.848
## .p19 1.288 0.020 65.846 0.000 1.288 1.802
## .p23 2.022 0.028 72.385 0.000 2.022 1.981
## .p39 1.311 0.018 72.292 0.000 1.311 1.979
## .p43 1.656 0.023 72.927 0.000 1.656 1.996
## .p09 1.246 0.016 76.116 0.000 1.246 2.083
## .p12 1.338 0.018 76.389 0.000 1.338 2.091
## .p16 1.692 0.027 62.260 0.000 1.692 1.704
## .p20 1.109 0.013 86.722 0.000 1.109 2.373
## .p28 1.205 0.019 64.736 0.000 1.205 1.772
## .p47 1.370 0.022 63.288 0.000 1.370 1.732
## .p50 1.184 0.016 74.812 0.000 1.184 2.048
## .p02 1.298 0.019 70.090 0.000 1.298 1.918
## .p03 1.630 0.025 64.022 0.000 1.630 1.752
## .p04 1.573 0.022 71.253 0.000 1.573 1.950
## .p21 1.562 0.023 67.411 0.000 1.562 1.845
## .p22 1.831 0.028 65.796 0.000 1.831 1.801
## .p30 1.706 0.026 66.859 0.000 1.706 1.830
## .p31 1.514 0.022 70.256 0.000 1.514 1.923
## .p37 1.479 0.021 71.457 0.000 1.479 1.956
## .p40 1.467 0.022 65.622 0.000 1.467 1.796
## .p44 1.599 0.022 74.349 0.000 1.599 2.035
## .p45 1.282 0.017 74.467 0.000 1.282 2.038
## .p46 1.502 0.022 68.064 0.000 1.502 1.863
## .p51 1.619 0.027 60.522 0.000 1.619 1.656
## .p52 1.804 0.028 64.384 0.000 1.804 1.762
## .p57 1.378 0.020 69.055 0.000 1.378 1.890
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .p06 0.961 0.049 19.570 0.000 0.961 0.665
## .p10 1.234 0.048 25.501 0.000 1.234 0.802
## .p14 0.394 0.025 15.580 0.000 0.394 0.417
## .p25 1.032 0.042 24.741 0.000 1.032 0.726
## .p27 0.919 0.040 22.915 0.000 0.919 0.648
## .p29 0.489 0.027 17.888 0.000 0.489 0.367
## .p33 0.298 0.021 14.357 0.000 0.298 0.321
## .p35 0.760 0.045 17.037 0.000 0.760 0.738
## .p48 0.303 0.031 9.754 0.000 0.303 0.677
## .p49 0.528 0.032 16.668 0.000 0.528 0.559
## .p53 0.607 0.039 15.389 0.000 0.607 0.536
## .p54 0.897 0.043 21.030 0.000 0.897 0.605
## .p07 0.584 0.037 15.883 0.000 0.584 0.715
## .p11 0.506 0.034 15.099 0.000 0.506 0.547
## .p13 0.353 0.041 8.653 0.000 0.353 0.796
## .p17 0.474 0.032 14.885 0.000 0.474 0.640
## .p24 0.175 0.018 9.605 0.000 0.175 0.669
## .p26 0.639 0.043 14.816 0.000 0.639 0.638
## .p36 0.425 0.031 13.811 0.000 0.425 0.547
## .p55 0.883 0.049 18.120 0.000 0.883 0.600
## .p56 0.754 0.044 17.223 0.000 0.754 0.501
## .p01 0.404 0.043 9.317 0.000 0.404 0.759
## .p18 0.986 0.045 21.986 0.000 0.986 0.627
## .p19 0.323 0.029 11.130 0.000 0.323 0.633
## .p23 0.678 0.033 20.665 0.000 0.678 0.651
## .p39 0.334 0.035 9.588 0.000 0.334 0.762
## .p43 0.378 0.026 14.324 0.000 0.378 0.548
## .p09 0.228 0.027 8.392 0.000 0.228 0.637
## .p12 0.289 0.030 9.810 0.000 0.289 0.707
## .p16 0.853 0.047 18.246 0.000 0.853 0.865
## .p20 0.164 0.030 5.404 0.000 0.164 0.753
## .p28 0.283 0.036 7.880 0.000 0.283 0.611
## .p47 0.390 0.036 10.858 0.000 0.390 0.623
## .p50 0.193 0.030 6.448 0.000 0.193 0.579
## .p02 0.246 0.028 8.724 0.000 0.246 0.536
## .p03 0.497 0.036 13.665 0.000 0.497 0.574
## .p04 0.375 0.032 11.877 0.000 0.375 0.576
## .p21 0.348 0.025 14.074 0.000 0.348 0.486
## .p22 0.801 0.039 20.556 0.000 0.801 0.775
## .p30 0.389 0.036 10.728 0.000 0.389 0.448
## .p31 0.181 0.019 9.457 0.000 0.181 0.292
## .p37 0.285 0.027 10.566 0.000 0.285 0.499
## .p40 0.232 0.030 7.729 0.000 0.232 0.348
## .p44 0.258 0.021 12.304 0.000 0.258 0.417
## .p45 0.218 0.020 11.032 0.000 0.218 0.551
## .p46 0.210 0.026 8.198 0.000 0.210 0.324
## .p51 0.487 0.036 13.473 0.000 0.487 0.510
## .p52 0.454 0.028 16.305 0.000 0.454 0.433
## .p57 0.172 0.021 8.308 0.000 0.172 0.323
## .spurn 0.028 0.009 2.984 0.003 0.058 0.058
## .terror 0.022 0.005 4.189 0.000 0.093 0.093
## .isolate 0.016 0.005 3.447 0.001 0.129 0.129
## .corrupt 0.058 0.010 5.777 0.000 0.445 0.445
## .ignore 0.060 0.008 7.512 0.000 0.284 0.284
## abuse 0.457 0.047 9.730 0.000 1.000 1.000
##
## R-Square:
## Estimate
## p06 0.335
## p10 0.198
## p14 0.583
## p25 0.274
## p27 0.352
## p29 0.633
## p33 0.679
## p35 0.262
## p48 0.323
## p49 0.441
## p53 0.464
## p54 0.395
## p07 0.285
## p11 0.453
## p13 0.204
## p17 0.360
## p24 0.331
## p26 0.362
## p36 0.453
## p55 0.400
## p56 0.499
## p01 0.241
## p18 0.373
## p19 0.367
## p23 0.349
## p39 0.238
## p43 0.452
## p09 0.363
## p12 0.293
## p16 0.135
## p20 0.247
## p28 0.389
## p47 0.377
## p50 0.421
## p02 0.464
## p03 0.426
## p04 0.424
## p21 0.514
## p22 0.225
## p30 0.552
## p31 0.708
## p37 0.501
## p40 0.652
## p44 0.583
## p45 0.449
## p46 0.676
## p51 0.490
## p52 0.567
## p57 0.677
## spurn 0.942
## terror 0.907
## isolate 0.871
## corrupt 0.555
## ignore 0.716
NOTE: With respect to fit of the structural model, we are now fitting a single higher-order factor INSTEAD OF covariances among the 5 factors.
To test the fit against the saturated (all possible factor correlations model), we can do a −2ΔLL scaled difference test.
anova(cfaNoHighEstimates, cfaHigherEstimates)
##
## Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")
##
## lavaan->lavTestLRT():
## lavaan NOTE: The "Chisq" column contains standard test statistics, not the
## robust test that should be reported per model. A robust difference test is
## a function of two standard (not robust) statistics.
## Df AIC BIC Chisq Chisq diff Df diff Pr(>Chisq)
## cfaNoHighEstimates 1117 138229 139045 6490.3
## cfaHigherEstimates 1122 138326 139115 6597.1 47.083 5 5.465e-09 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
This higher-order factor model uses 5 fewer parameters (5 higher-order loadings to replace the 10 covariances among the factors).
According to the −2ΔLL scaled difference relative to the previous model,
−2ΔLL (5) = 47.083, p < .0001
trying to reproduce the 5 factor covariances with a single higher-order factor results in a significant decrease in fit. Based on the factor correlations we examined earlier and the standardized higher-order loadings, I’d guess the issue lies with the “corrupting”” factor not being as related to the others.
For the sake of illustration, we can try one more alternative – what if the items were measuring a single factor (i.e., a single score)? Syntax for CFA model with MLR including a single factor instead of a higher-order factor (“smallest model” for comparison):
cfaSingleSyntax = "
abuse =~ p06 + p10 + p14 + p25 + p27 + p29 + p33 + p35 + p48 + p49 + p53 + p54 +
p07 + p11 + p13 + p17 + p24 + p26 + p36 + p55 + p56 + p01 + p18 + p19 +
p23 + p39 + p43 + p09 + p12 + p16 + p20 + p28 + p47 + p50 + p02 + p03 +
p04 + p21 + p22 + p30 + p31 + p37 + p40 + p44 + p45 + p46 + p51 + p52 + p57
"
cfaSingleEstimates = cfa(model = cfaSingleSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "MLR")
summary(cfaSingleEstimates, fit.measures = TRUE, rsquare = TRUE, standardized = TRUE)
## lavaan 0.6-19 ended normally after 47 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 147
##
## Number of observations 1335
## Number of missing patterns 1
##
## Model Test User Model:
## Standard Scaled
## Test Statistic 9209.963 6186.391
## Degrees of freedom 1127 1127
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.489
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 35067.550 22808.622
## Degrees of freedom 1176 1176
## P-value 0.000 0.000
## Scaling correction factor 1.537
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.762 0.766
## Tucker-Lewis Index (TLI) 0.751 0.756
##
## Robust Comparative Fit Index (CFI) 0.774
## Robust Tucker-Lewis Index (TLI) 0.764
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -70317.248 -70317.248
## Scaling correction factor 2.392
## for the MLR correction
## Loglikelihood unrestricted model (H1) -65712.267 -65712.267
## Scaling correction factor 1.593
## for the MLR correction
##
## Akaike (AIC) 140928.496 140928.496
## Bayesian (BIC) 141692.409 141692.409
## Sample-size adjusted Bayesian (SABIC) 141225.455 141225.455
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.073 0.058
## 90 Percent confidence interval - lower 0.072 0.057
## 90 Percent confidence interval - upper 0.075 0.059
## P-value H_0: RMSEA <= 0.050 0.000 0.000
## P-value H_0: RMSEA >= 0.080 0.000 0.000
##
## Robust RMSEA 0.071
## 90 Percent confidence interval - lower 0.069
## 90 Percent confidence interval - upper 0.073
## P-value H_0: Robust RMSEA <= 0.050 0.000
## P-value H_0: Robust RMSEA >= 0.080 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.062 0.062
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## abuse =~
## p06 1.000 0.640 0.532
## p10 0.784 0.066 11.916 0.000 0.502 0.405
## p14 1.084 0.068 15.992 0.000 0.694 0.714
## p25 0.898 0.064 14.001 0.000 0.575 0.482
## p27 1.026 0.062 16.490 0.000 0.657 0.551
## p29 1.333 0.075 17.690 0.000 0.854 0.739
## p33 1.183 0.071 16.750 0.000 0.757 0.785
## p35 0.939 0.077 12.203 0.000 0.601 0.592
## p48 0.598 0.066 8.990 0.000 0.383 0.573
## p49 0.949 0.064 14.722 0.000 0.608 0.625
## p53 1.089 0.068 16.023 0.000 0.697 0.655
## p54 1.099 0.074 14.764 0.000 0.704 0.578
## p07 0.748 0.067 11.082 0.000 0.479 0.529
## p11 0.934 0.075 12.466 0.000 0.598 0.622
## p13 0.431 0.061 7.022 0.000 0.276 0.414
## p17 0.719 0.067 10.725 0.000 0.460 0.535
## p24 0.427 0.053 8.106 0.000 0.273 0.534
## p26 0.915 0.060 15.249 0.000 0.586 0.585
## p36 0.828 0.067 12.339 0.000 0.530 0.602
## p55 1.067 0.069 15.406 0.000 0.683 0.563
## p56 1.183 0.075 15.714 0.000 0.758 0.618
## p01 0.526 0.060 8.737 0.000 0.337 0.462
## p18 1.066 0.064 16.776 0.000 0.683 0.544
## p19 0.639 0.066 9.710 0.000 0.409 0.573
## p23 0.844 0.061 13.757 0.000 0.540 0.529
## p39 0.473 0.055 8.581 0.000 0.303 0.457
## p43 0.812 0.061 13.257 0.000 0.520 0.627
## p09 0.430 0.051 8.357 0.000 0.275 0.460
## p12 0.421 0.052 8.132 0.000 0.270 0.422
## p16 0.389 0.057 6.819 0.000 0.249 0.251
## p20 0.216 0.041 5.257 0.000 0.138 0.295
## p28 0.473 0.068 6.991 0.000 0.303 0.445
## p47 0.624 0.070 8.948 0.000 0.399 0.505
## p50 0.438 0.062 7.082 0.000 0.280 0.485
## p02 0.721 0.069 10.525 0.000 0.462 0.682
## p03 0.899 0.069 13.001 0.000 0.576 0.619
## p04 0.754 0.062 12.191 0.000 0.483 0.598
## p21 0.874 0.074 11.734 0.000 0.559 0.661
## p22 0.882 0.057 15.441 0.000 0.565 0.556
## p30 1.017 0.070 14.547 0.000 0.651 0.698
## p31 0.960 0.069 13.984 0.000 0.615 0.781
## p37 0.775 0.066 11.728 0.000 0.496 0.657
## p40 0.975 0.071 13.805 0.000 0.624 0.765
## p44 0.916 0.064 14.284 0.000 0.587 0.746
## p45 0.679 0.063 10.774 0.000 0.435 0.691
## p46 0.953 0.072 13.227 0.000 0.610 0.757
## p51 0.955 0.073 13.161 0.000 0.612 0.626
## p52 1.212 0.068 17.942 0.000 0.776 0.758
## p57 0.878 0.068 12.944 0.000 0.562 0.771
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .p06 2.520 0.033 76.549 0.000 2.520 2.095
## .p10 2.208 0.034 65.045 0.000 2.208 1.780
## .p14 1.600 0.027 60.165 0.000 1.600 1.647
## .p25 2.029 0.033 62.184 0.000 2.029 1.702
## .p27 2.229 0.033 68.385 0.000 2.229 1.872
## .p29 1.898 0.032 60.059 0.000 1.898 1.644
## .p33 1.601 0.026 60.633 0.000 1.601 1.659
## .p35 1.776 0.028 63.917 0.000 1.776 1.749
## .p48 1.236 0.018 67.548 0.000 1.236 1.849
## .p49 1.649 0.027 61.987 0.000 1.649 1.697
## .p53 1.844 0.029 63.324 0.000 1.844 1.733
## .p54 1.934 0.033 58.053 0.000 1.934 1.589
## .p07 1.622 0.025 65.517 0.000 1.622 1.793
## .p11 1.586 0.026 60.218 0.000 1.586 1.648
## .p13 1.213 0.018 66.573 0.000 1.213 1.822
## .p17 1.493 0.024 63.352 0.000 1.493 1.734
## .p24 1.196 0.014 85.313 0.000 1.196 2.335
## .p26 2.026 0.027 73.948 0.000 2.026 2.024
## .p36 1.459 0.024 60.477 0.000 1.459 1.655
## .p55 1.837 0.033 55.295 0.000 1.837 1.513
## .p56 1.923 0.034 57.270 0.000 1.923 1.567
## .p01 1.303 0.020 65.295 0.000 1.303 1.787
## .p18 2.318 0.034 67.527 0.000 2.318 1.848
## .p19 1.288 0.020 65.846 0.000 1.288 1.802
## .p23 2.022 0.028 72.385 0.000 2.022 1.981
## .p39 1.311 0.018 72.292 0.000 1.311 1.979
## .p43 1.656 0.023 72.927 0.000 1.656 1.996
## .p09 1.246 0.016 76.116 0.000 1.246 2.083
## .p12 1.338 0.018 76.389 0.000 1.338 2.091
## .p16 1.692 0.027 62.260 0.000 1.692 1.704
## .p20 1.109 0.013 86.722 0.000 1.109 2.373
## .p28 1.205 0.019 64.736 0.000 1.205 1.772
## .p47 1.370 0.022 63.288 0.000 1.370 1.732
## .p50 1.184 0.016 74.812 0.000 1.184 2.048
## .p02 1.298 0.019 70.090 0.000 1.298 1.918
## .p03 1.630 0.025 64.022 0.000 1.630 1.752
## .p04 1.573 0.022 71.253 0.000 1.573 1.950
## .p21 1.562 0.023 67.411 0.000 1.562 1.845
## .p22 1.831 0.028 65.796 0.000 1.831 1.801
## .p30 1.706 0.026 66.859 0.000 1.706 1.830
## .p31 1.514 0.022 70.256 0.000 1.514 1.923
## .p37 1.479 0.021 71.457 0.000 1.479 1.956
## .p40 1.467 0.022 65.622 0.000 1.467 1.796
## .p44 1.599 0.022 74.349 0.000 1.599 2.035
## .p45 1.282 0.017 74.467 0.000 1.282 2.038
## .p46 1.502 0.022 68.064 0.000 1.502 1.863
## .p51 1.619 0.027 60.522 0.000 1.619 1.656
## .p52 1.804 0.028 64.384 0.000 1.804 1.762
## .p57 1.378 0.020 69.055 0.000 1.378 1.890
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .p06 1.037 0.048 21.813 0.000 1.037 0.717
## .p10 1.287 0.048 26.819 0.000 1.287 0.836
## .p14 0.462 0.029 16.035 0.000 0.462 0.490
## .p25 1.091 0.042 25.785 0.000 1.091 0.767
## .p27 0.987 0.041 24.269 0.000 0.987 0.696
## .p29 0.605 0.033 18.229 0.000 0.605 0.454
## .p33 0.357 0.023 15.700 0.000 0.357 0.384
## .p35 0.669 0.041 16.194 0.000 0.669 0.649
## .p48 0.300 0.032 9.440 0.000 0.300 0.672
## .p49 0.576 0.033 17.198 0.000 0.576 0.609
## .p53 0.646 0.040 16.033 0.000 0.646 0.571
## .p54 0.986 0.045 21.967 0.000 0.986 0.666
## .p07 0.589 0.036 16.149 0.000 0.589 0.720
## .p11 0.568 0.036 15.739 0.000 0.568 0.614
## .p13 0.368 0.042 8.712 0.000 0.368 0.829
## .p17 0.529 0.034 15.407 0.000 0.529 0.714
## .p24 0.187 0.020 9.378 0.000 0.187 0.715
## .p26 0.659 0.041 16.117 0.000 0.659 0.658
## .p36 0.496 0.035 14.348 0.000 0.496 0.638
## .p55 1.007 0.051 19.864 0.000 1.007 0.683
## .p56 0.931 0.046 20.420 0.000 0.931 0.619
## .p01 0.419 0.044 9.581 0.000 0.419 0.787
## .p18 1.107 0.043 25.620 0.000 1.107 0.704
## .p19 0.343 0.030 11.613 0.000 0.343 0.672
## .p23 0.750 0.033 22.901 0.000 0.750 0.720
## .p39 0.347 0.035 9.948 0.000 0.347 0.791
## .p43 0.418 0.023 17.799 0.000 0.418 0.607
## .p09 0.282 0.028 9.966 0.000 0.282 0.788
## .p12 0.337 0.031 11.006 0.000 0.337 0.822
## .p16 0.924 0.046 19.893 0.000 0.924 0.937
## .p20 0.199 0.033 6.011 0.000 0.199 0.913
## .p28 0.371 0.039 9.444 0.000 0.371 0.802
## .p47 0.466 0.037 12.715 0.000 0.466 0.745
## .p50 0.255 0.033 7.825 0.000 0.255 0.765
## .p02 0.245 0.027 9.217 0.000 0.245 0.534
## .p03 0.534 0.038 14.053 0.000 0.534 0.617
## .p04 0.418 0.033 12.790 0.000 0.418 0.642
## .p21 0.404 0.028 14.351 0.000 0.404 0.563
## .p22 0.714 0.035 20.130 0.000 0.714 0.691
## .p30 0.446 0.039 11.520 0.000 0.446 0.512
## .p31 0.242 0.023 10.688 0.000 0.242 0.390
## .p37 0.325 0.030 10.910 0.000 0.325 0.569
## .p40 0.277 0.032 8.742 0.000 0.277 0.415
## .p44 0.274 0.021 12.945 0.000 0.274 0.443
## .p45 0.207 0.017 12.315 0.000 0.207 0.523
## .p46 0.277 0.029 9.723 0.000 0.277 0.427
## .p51 0.581 0.040 14.598 0.000 0.581 0.608
## .p52 0.447 0.027 16.499 0.000 0.447 0.426
## .p57 0.216 0.022 9.615 0.000 0.216 0.406
## abuse 0.410 0.045 9.048 0.000 1.000 1.000
##
## R-Square:
## Estimate
## p06 0.283
## p10 0.164
## p14 0.510
## p25 0.233
## p27 0.304
## p29 0.546
## p33 0.616
## p35 0.351
## p48 0.328
## p49 0.391
## p53 0.429
## p54 0.334
## p07 0.280
## p11 0.386
## p13 0.171
## p17 0.286
## p24 0.285
## p26 0.342
## p36 0.362
## p55 0.317
## p56 0.381
## p01 0.213
## p18 0.296
## p19 0.328
## p23 0.280
## p39 0.209
## p43 0.393
## p09 0.212
## p12 0.178
## p16 0.063
## p20 0.087
## p28 0.198
## p47 0.255
## p50 0.235
## p02 0.466
## p03 0.383
## p04 0.358
## p21 0.437
## p22 0.309
## p30 0.488
## p31 0.610
## p37 0.431
## p40 0.585
## p44 0.557
## p45 0.477
## p46 0.573
## p51 0.392
## p52 0.574
## p57 0.594
NOTE: With respect to fit of the structural model, we are now fitting a single factor INSTEAD OF 5 factors and a higher-order factor. This will tell us the extent to which a “total score” is appropriate.
anova(cfaSingleEstimates, cfaNoHighEstimates, cfaHigherEstimates)
##
## Scaled Chi-Squared Difference Test (method = "satorra.bentler.2001")
##
## lavaan->lavTestLRT():
## lavaan NOTE: The "Chisq" column contains standard test statistics, not the
## robust test that should be reported per model. A robust difference test is
## a function of two standard (not robust) statistics.
## Df AIC BIC Chisq Chisq diff Df diff Pr(>Chisq)
## cfaNoHighEstimates 1117 138229 139045 6490.3
## cfaHigherEstimates 1122 138326 139115 6597.1 47.08 5 5.465e-09 ***
## cfaSingleEstimates 1127 140928 141692 9210.0 448.91 5 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
According to the −2ΔLL scaled difference relative to the previous model, −2ΔLL (5) = 448.91, p < .0001
Therefore, a single factor fits significantly worse than 5 factors + a higher-order factor, and so one factor does not capture the covariances for these 49 items.
We can try one more alternative – what if the items were measuring a single factor (i.e., a single score)?
ifaSingleSyntax = "
abuse =~ p06 + p10 + p14 + p25 + p27 + p29 + p33 + p35 + p48 + p49 + p53 + p54 +
p07 + p11 + p13 + p17 + p24 + p26 + p36 + p55 + p56 + p01 + p18 + p19 +
p23 + p39 + p43 + p09 + p12 + p16 + p20 + p28 + p47 + p50 + p02 + p03 +
p04 + p21 + p22 + p30 + p31 + p37 + p40 + p44 + p45 + p46 + p51 + p52 + p57
"
ifaSingleEstimates = cfa(model = ifaSingleSyntax, data = abuseData, std.lv = FALSE, mimic = "mplus", estimator = "WLSMV",
ordered = c("p06", "p10", "p14", "p25", "p27", "p29", "p33", "p35", "p48", "p49", "p53", "p54",
"p07", "p11", "p13", "p17", "p24", "p26", "p36", "p55", "p56", "p01", "p18", "p19",
"p23", "p39", "p43", "p09", "p12", "p16", "p20", "p28", "p47", "p50", "p02", "p03",
"p04", "p21", "p22", "p30", "p31", "p37", "p40", "p44", "p45", "p46", "p51", "p52", "p57"))
summary(ifaSingleEstimates, fit.measures = TRUE, rsquare = TRUE, standardized = TRUE)
## lavaan 0.6-19 ended normally after 57 iterations
##
## Estimator DWLS
## Optimization method NLMINB
## Number of model parameters 245
##
## Number of observations 1335
##
## Model Test User Model:
## Standard Scaled
## Test Statistic 8446.686 7562.120
## Degrees of freedom 1127 1127
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.229
## Shift parameter 687.463
## simple second-order correction (WLSMV)
##
## Model Test Baseline Model:
##
## Test statistic 471778.214 67352.685
## Degrees of freedom 1176 1176
## P-value 0.000 0.000
## Scaling correction factor 7.111
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.984 0.903
## Tucker-Lewis Index (TLI) 0.984 0.899
##
## Robust Comparative Fit Index (CFI) 0.729
## Robust Tucker-Lewis Index (TLI) 0.717
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.070 0.065
## 90 Percent confidence interval - lower 0.068 0.064
## 90 Percent confidence interval - upper 0.071 0.067
## P-value H_0: RMSEA <= 0.050 0.000 0.000
## P-value H_0: RMSEA >= 0.080 0.000 0.000
##
## Robust RMSEA 0.104
## 90 Percent confidence interval - lower 0.102
## 90 Percent confidence interval - upper 0.106
## P-value H_0: Robust RMSEA <= 0.050 0.000
## P-value H_0: Robust RMSEA >= 0.080 1.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.073 0.073
##
## Parameter Estimates:
##
## Parameterization Delta
## Standard errors Robust.sem
## Information Expected
## Information saturated (h1) model Unstructured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## abuse =~
## p06 1.000 0.605 0.605
## p10 0.797 0.045 17.547 0.000 0.483 0.483
## p14 1.310 0.047 27.940 0.000 0.793 0.793
## p25 0.919 0.045 20.630 0.000 0.556 0.556
## p27 1.033 0.042 24.416 0.000 0.625 0.625
## p29 1.342 0.047 28.420 0.000 0.812 0.812
## p33 1.425 0.049 29.161 0.000 0.862 0.862
## p35 1.118 0.048 23.533 0.000 0.677 0.677
## p48 1.306 0.059 22.326 0.000 0.791 0.791
## p49 1.170 0.045 26.144 0.000 0.708 0.708
## p53 1.208 0.046 26.355 0.000 0.731 0.731
## p54 1.108 0.045 24.896 0.000 0.671 0.671
## p07 1.042 0.050 20.905 0.000 0.631 0.631
## p11 1.206 0.051 23.802 0.000 0.730 0.730
## p13 1.108 0.064 17.239 0.000 0.670 0.670
## p17 1.068 0.049 21.591 0.000 0.647 0.647
## p24 1.234 0.064 19.345 0.000 0.747 0.747
## p26 1.076 0.043 25.098 0.000 0.651 0.651
## p36 1.236 0.049 25.288 0.000 0.748 0.748
## p55 1.122 0.044 25.667 0.000 0.679 0.679
## p56 1.186 0.045 26.163 0.000 0.718 0.718
## p01 1.059 0.054 19.479 0.000 0.641 0.641
## p18 1.022 0.041 24.990 0.000 0.619 0.619
## p19 1.243 0.054 23.209 0.000 0.753 0.753
## p23 0.988 0.044 22.269 0.000 0.598 0.598
## p39 1.052 0.055 19.165 0.000 0.637 0.637
## p43 1.159 0.045 25.616 0.000 0.702 0.702
## p09 1.059 0.059 17.845 0.000 0.641 0.641
## p12 0.959 0.055 17.588 0.000 0.581 0.581
## p16 0.585 0.051 11.429 0.000 0.354 0.354
## p20 1.113 0.070 15.991 0.000 0.674 0.674
## p28 1.169 0.064 18.183 0.000 0.707 0.707
## p47 1.106 0.058 19.033 0.000 0.670 0.670
## p50 1.230 0.065 18.870 0.000 0.745 0.745
## p02 1.351 0.056 24.292 0.000 0.817 0.817
## p03 1.175 0.047 25.096 0.000 0.711 0.711
## p04 1.146 0.048 24.076 0.000 0.694 0.694
## p21 1.246 0.049 25.354 0.000 0.754 0.754
## p22 1.068 0.043 24.814 0.000 0.646 0.646
## p30 1.319 0.049 26.715 0.000 0.798 0.798
## p31 1.449 0.051 28.545 0.000 0.877 0.877
## p37 1.289 0.051 25.413 0.000 0.780 0.780
## p40 1.437 0.052 27.821 0.000 0.870 0.870
## p44 1.384 0.049 28.386 0.000 0.837 0.837
## p45 1.355 0.054 25.302 0.000 0.820 0.820
## p46 1.435 0.052 27.352 0.000 0.868 0.868
## p51 1.221 0.048 25.611 0.000 0.739 0.739
## p52 1.348 0.045 29.932 0.000 0.816 0.816
## p57 1.466 0.051 28.559 0.000 0.887 0.887
##
## Thresholds:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## p06|t1 -0.751 0.038 -19.725 0.000 -0.751 -0.751
## p06|t2 0.154 0.034 4.456 0.000 0.154 0.154
## p06|t3 0.700 0.038 18.635 0.000 0.700 0.700
## p06|t4 1.513 0.053 28.430 0.000 1.513 1.513
## p10|t1 -0.312 0.035 -8.929 0.000 -0.312 -0.312
## p10|t2 0.427 0.035 12.021 0.000 0.427 0.427
## p10|t3 0.869 0.039 22.005 0.000 0.869 0.869
## p10|t4 1.568 0.055 28.478 0.000 1.568 1.568
## p14|t1 0.360 0.035 10.233 0.000 0.360 0.360
## p14|t2 1.047 0.042 24.855 0.000 1.047 1.047
## p14|t3 1.446 0.051 28.266 0.000 1.446 1.446
## p14|t4 2.081 0.081 25.660 0.000 2.081 2.081
## p25|t1 -0.082 0.034 -2.379 0.017 -0.082 -0.082
## p25|t2 0.547 0.036 15.089 0.000 0.547 0.547
## p25|t3 0.916 0.040 22.841 0.000 0.916 0.916
## p25|t4 1.965 0.073 26.747 0.000 1.965 1.965
## p27|t1 -0.394 0.035 -11.155 0.000 -0.394 -0.394
## p27|t2 0.398 0.035 11.264 0.000 0.398 0.398
## p27|t3 0.888 0.040 22.351 0.000 0.888 0.888
## p27|t4 1.712 0.061 28.251 0.000 1.712 1.712
## p29|t1 0.033 0.034 0.957 0.338 0.033 0.033
## p29|t2 0.715 0.038 18.948 0.000 0.715 0.715
## p29|t3 1.090 0.043 25.426 0.000 1.090 1.090
## p29|t4 1.800 0.065 27.879 0.000 1.800 1.800
## p33|t1 0.346 0.035 9.853 0.000 0.346 0.346
## p33|t2 1.063 0.042 25.077 0.000 1.063 1.063
## p33|t3 1.440 0.051 28.249 0.000 1.440 1.440
## p33|t4 2.115 0.084 25.302 0.000 2.115 2.115
## p35|t1 0.022 0.034 0.629 0.529 0.022 0.022
## p35|t2 0.960 0.041 23.561 0.000 0.960 0.960
## p35|t3 1.351 0.049 27.836 0.000 1.351 1.351
## p35|t4 1.915 0.071 27.140 0.000 1.915 1.915
## p48|t1 1.047 0.042 24.855 0.000 1.047 1.047
## p48|t2 1.636 0.058 28.433 0.000 1.636 1.636
## p48|t3 1.881 0.069 27.386 0.000 1.881 1.881
## p48|t4 2.433 0.114 21.310 0.000 2.433 2.433
## p49|t1 0.265 0.035 7.622 0.000 0.265 0.265
## p49|t2 0.975 0.041 23.798 0.000 0.975 0.975
## p49|t3 1.451 0.051 28.284 0.000 1.451 1.451
## p49|t4 2.151 0.086 24.900 0.000 2.151 2.151
## p53|t1 0.003 0.034 0.082 0.935 0.003 0.003
## p53|t2 0.782 0.038 20.341 0.000 0.782 0.782
## p53|t3 1.275 0.047 27.323 0.000 1.275 1.275
## p53|t4 1.927 0.071 27.050 0.000 1.927 1.927
## p54|t1 0.076 0.034 2.215 0.027 0.076 0.076
## p54|t2 0.637 0.037 17.217 0.000 0.637 0.637
## p54|t3 0.999 0.041 24.171 0.000 0.999 0.999
## p54|t4 1.712 0.061 28.251 0.000 1.712 1.712
## p07|t1 0.207 0.035 5.985 0.000 0.207 0.207
## p07|t2 1.128 0.044 25.892 0.000 1.128 1.128
## p07|t3 1.549 0.054 28.470 0.000 1.549 1.549
## p07|t4 2.212 0.091 24.192 0.000 2.212 2.212
## p11|t1 0.382 0.035 10.830 0.000 0.382 0.382
## p11|t2 1.060 0.042 25.033 0.000 1.060 1.060
## p11|t3 1.456 0.051 28.300 0.000 1.456 1.456
## p11|t4 2.115 0.084 25.302 0.000 2.115 2.115
## p13|t1 1.150 0.044 26.137 0.000 1.150 1.150
## p13|t2 1.658 0.058 28.395 0.000 1.658 1.658
## p13|t3 1.881 0.069 27.386 0.000 1.881 1.881
## p13|t4 2.336 0.103 22.620 0.000 2.336 2.336
## p17|t1 0.451 0.036 12.670 0.000 0.451 0.451
## p17|t2 1.275 0.047 27.323 0.000 1.275 1.275
## p17|t3 1.615 0.057 28.459 0.000 1.615 1.615
## p17|t4 2.234 0.093 23.922 0.000 2.234 2.234
## p24|t1 1.009 0.041 24.310 0.000 1.009 1.009
## p24|t2 1.904 0.070 27.226 0.000 1.904 1.904
## p24|t3 2.433 0.114 21.310 0.000 2.433 2.433
## p24|t4 2.748 0.164 16.777 0.000 2.748 2.748
## p26|t1 -0.468 0.036 -13.101 0.000 -0.468 -0.468
## p26|t2 0.813 0.039 20.952 0.000 0.813 0.813
## p26|t3 1.242 0.046 27.049 0.000 1.242 1.242
## p26|t4 1.870 0.068 27.460 0.000 1.870 1.870
## p36|t1 0.587 0.037 16.050 0.000 0.587 0.587
## p36|t2 1.242 0.046 27.049 0.000 1.242 1.242
## p36|t3 1.531 0.054 28.454 0.000 1.531 1.531
## p36|t4 2.308 0.100 22.985 0.000 2.308 2.308
## p55|t1 0.253 0.035 7.295 0.000 0.253 0.253
## p55|t2 0.700 0.038 18.635 0.000 0.700 0.700
## p55|t3 1.002 0.041 24.218 0.000 1.002 1.002
## p55|t4 1.790 0.064 27.927 0.000 1.790 1.790
## p56|t1 0.114 0.034 3.309 0.001 0.114 0.114
## p56|t2 0.651 0.037 17.533 0.000 0.651 0.651
## p56|t3 0.945 0.041 23.323 0.000 0.945 0.945
## p56|t4 1.772 0.063 28.015 0.000 1.772 1.772
## p01|t1 0.836 0.039 21.406 0.000 0.836 0.836
## p01|t2 1.575 0.055 28.478 0.000 1.575 1.575
## p01|t3 1.881 0.069 27.386 0.000 1.881 1.881
## p01|t4 2.191 0.090 24.444 0.000 2.191 2.191
## p18|t1 -0.416 0.035 -11.751 0.000 -0.416 -0.416
## p18|t2 0.294 0.035 8.439 0.000 0.294 0.294
## p18|t3 0.826 0.039 21.205 0.000 0.826 0.826
## p18|t4 1.495 0.053 28.399 0.000 1.495 1.495
## p19|t1 0.899 0.040 22.548 0.000 0.899 0.899
## p19|t2 1.525 0.054 28.447 0.000 1.525 1.525
## p19|t3 1.881 0.069 27.386 0.000 1.881 1.881
## p19|t4 2.336 0.103 22.620 0.000 2.336 2.336
## p23|t1 -0.334 0.035 -9.527 0.000 -0.334 -0.334
## p23|t2 0.616 0.037 16.740 0.000 0.616 0.616
## p23|t3 1.254 0.046 27.154 0.000 1.254 1.254
## p23|t4 2.097 0.082 25.486 0.000 2.097 2.097
## p39|t1 0.717 0.038 19.000 0.000 0.717 0.717
## p39|t2 1.696 0.060 28.301 0.000 1.696 1.696
## p39|t3 2.049 0.079 25.978 0.000 2.049 2.049
## p39|t4 2.366 0.106 22.223 0.000 2.366 2.366
## p43|t1 0.033 0.034 0.957 0.338 0.033 0.033
## p43|t2 1.202 0.045 26.684 0.000 1.202 1.202
## p43|t3 1.673 0.059 28.362 0.000 1.673 1.673
## p43|t4 2.433 0.114 21.310 0.000 2.433 2.433
## p09|t1 0.908 0.040 22.695 0.000 0.908 0.908
## p09|t2 1.688 0.060 28.323 0.000 1.688 1.688
## p09|t3 2.151 0.086 24.900 0.000 2.151 2.151
## p09|t4 2.748 0.164 16.777 0.000 2.748 2.748
## p12|t1 0.589 0.037 16.103 0.000 0.589 0.589
## p12|t2 1.809 0.065 27.829 0.000 1.809 1.809
## p12|t3 2.133 0.085 25.107 0.000 2.133 2.133
## p12|t4 2.398 0.110 21.788 0.000 2.398 2.398
## p16|t1 0.196 0.035 5.658 0.000 0.196 0.196
## p16|t2 0.963 0.041 23.609 0.000 0.963 0.963
## p16|t3 1.360 0.049 27.889 0.000 1.360 1.360
## p16|t4 2.171 0.088 24.679 0.000 2.171 2.171
## p20|t1 1.478 0.052 28.361 0.000 1.478 1.478
## p20|t2 2.034 0.078 26.124 0.000 2.034 2.034
## p20|t3 2.191 0.090 24.444 0.000 2.191 2.191
## p20|t4 2.674 0.150 17.852 0.000 2.674 2.674
## p28|t1 1.210 0.045 26.759 0.000 1.210 1.210
## p28|t2 1.688 0.060 28.323 0.000 1.688 1.688
## p28|t3 1.809 0.065 27.829 0.000 1.809 1.809
## p28|t4 2.282 0.098 23.321 0.000 2.282 2.282
## p47|t1 0.724 0.038 19.156 0.000 0.724 0.724
## p47|t2 1.370 0.049 27.941 0.000 1.370 1.370
## p47|t3 1.754 0.062 28.094 0.000 1.754 1.754
## p47|t4 2.308 0.100 22.985 0.000 2.308 2.308
## p50|t1 1.150 0.044 26.137 0.000 1.150 1.150
## p50|t2 1.849 0.067 27.596 0.000 1.849 1.849
## p50|t3 2.081 0.081 25.660 0.000 2.081 2.081
## p50|t4 2.433 0.114 21.310 0.000 2.433 2.433
## p02|t1 0.789 0.039 20.494 0.000 0.789 0.789
## p02|t2 1.650 0.058 28.409 0.000 1.650 1.650
## p02|t3 1.940 0.072 26.954 0.000 1.940 1.940
## p02|t4 2.433 0.114 21.310 0.000 2.433 2.433
## p03|t1 0.209 0.035 6.040 0.000 0.209 0.209
## p03|t2 1.111 0.043 25.682 0.000 1.111 1.111
## p03|t3 1.594 0.056 28.474 0.000 1.594 1.594
## p03|t4 1.978 0.074 26.636 0.000 1.978 1.978
## p04|t1 0.213 0.035 6.149 0.000 0.213 0.213
## p04|t2 1.168 0.044 26.337 0.000 1.168 1.168
## p04|t3 1.940 0.072 26.954 0.000 1.940 1.940
## p04|t4 2.336 0.103 22.620 0.000 2.336 2.336
## p21|t1 0.251 0.035 7.240 0.000 0.251 0.251
## p21|t2 1.275 0.047 27.323 0.000 1.275 1.275
## p21|t3 1.688 0.060 28.323 0.000 1.688 1.688
## p21|t4 2.191 0.090 24.444 0.000 2.191 2.191
## p22|t1 -0.007 0.034 -0.191 0.848 -0.007 -0.007
## p22|t2 0.756 0.038 19.828 0.000 0.756 0.756
## p22|t3 1.351 0.049 27.836 0.000 1.351 1.351
## p22|t4 2.171 0.088 24.679 0.000 2.171 2.171
## p30|t1 0.046 0.034 1.340 0.180 0.046 0.046
## p30|t2 1.080 0.043 25.296 0.000 1.080 1.080
## p30|t3 1.531 0.054 28.454 0.000 1.531 1.531
## p30|t4 2.019 0.077 26.263 0.000 2.019 2.019
## p31|t1 0.312 0.035 8.929 0.000 0.312 0.312
## p31|t2 1.292 0.047 27.452 0.000 1.292 1.292
## p31|t3 1.915 0.071 27.140 0.000 1.915 1.915
## p31|t4 2.308 0.100 22.985 0.000 2.308 2.308
## p37|t1 0.350 0.035 9.962 0.000 0.350 0.350
## p37|t2 1.414 0.050 28.150 0.000 1.414 1.414
## p37|t3 1.915 0.071 27.140 0.000 1.915 1.915
## p37|t4 2.366 0.106 22.223 0.000 2.366 2.366
## p40|t1 0.466 0.036 13.048 0.000 0.466 0.466
## p40|t2 1.310 0.047 27.577 0.000 1.310 1.310
## p40|t3 1.763 0.063 28.056 0.000 1.763 1.763
## p40|t4 2.258 0.096 23.633 0.000 2.258 2.258
## p44|t1 0.095 0.034 2.762 0.006 0.095 0.095
## p44|t2 1.305 0.047 27.546 0.000 1.305 1.305
## p44|t3 1.870 0.068 27.460 0.000 1.870 1.870
## p44|t4 2.308 0.100 22.985 0.000 2.308 2.308
## p45|t1 0.807 0.039 20.851 0.000 0.807 0.807
## p45|t2 1.615 0.057 28.459 0.000 1.615 1.615
## p45|t3 2.171 0.088 24.679 0.000 2.171 2.171
## p45|t4 2.612 0.139 18.749 0.000 2.612 2.612
## p46|t1 0.356 0.035 10.124 0.000 0.356 0.356
## p46|t2 1.319 0.048 27.637 0.000 1.319 1.319
## p46|t3 1.809 0.065 27.829 0.000 1.809 1.809
## p46|t4 2.258 0.096 23.633 0.000 2.258 2.258
## p51|t1 0.330 0.035 9.418 0.000 0.330 0.330
## p51|t2 1.009 0.041 24.310 0.000 1.009 1.009
## p51|t3 1.467 0.052 28.332 0.000 1.467 1.467
## p51|t4 2.049 0.079 25.978 0.000 2.049 2.049
## p52|t1 0.016 0.034 0.465 0.642 0.016 0.016
## p52|t2 0.852 0.039 21.707 0.000 0.852 0.852
## p52|t3 1.323 0.048 27.667 0.000 1.323 1.323
## p52|t4 2.034 0.078 26.124 0.000 2.034 2.034
## p57|t1 0.600 0.037 16.369 0.000 0.600 0.600
## p57|t2 1.467 0.052 28.332 0.000 1.467 1.467
## p57|t3 2.019 0.077 26.263 0.000 2.019 2.019
## p57|t4 2.282 0.098 23.321 0.000 2.282 2.282
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .p06 0.634 0.634 0.634
## .p10 0.767 0.767 0.767
## .p14 0.371 0.371 0.371
## .p25 0.690 0.690 0.690
## .p27 0.609 0.609 0.609
## .p29 0.340 0.340 0.340
## .p33 0.256 0.256 0.256
## .p35 0.542 0.542 0.542
## .p48 0.375 0.375 0.375
## .p49 0.498 0.498 0.498
## .p53 0.465 0.465 0.465
## .p54 0.550 0.550 0.550
## .p07 0.602 0.602 0.602
## .p11 0.468 0.468 0.468
## .p13 0.551 0.551 0.551
## .p17 0.582 0.582 0.582
## .p24 0.442 0.442 0.442
## .p26 0.576 0.576 0.576
## .p36 0.440 0.440 0.440
## .p55 0.539 0.539 0.539
## .p56 0.484 0.484 0.484
## .p01 0.589 0.589 0.589
## .p18 0.617 0.617 0.617
## .p19 0.434 0.434 0.434
## .p23 0.642 0.642 0.642
## .p39 0.595 0.595 0.595
## .p43 0.508 0.508 0.508
## .p09 0.589 0.589 0.589
## .p12 0.663 0.663 0.663
## .p16 0.875 0.875 0.875
## .p20 0.546 0.546 0.546
## .p28 0.500 0.500 0.500
## .p47 0.552 0.552 0.552
## .p50 0.445 0.445 0.445
## .p02 0.332 0.332 0.332
## .p03 0.494 0.494 0.494
## .p04 0.519 0.519 0.519
## .p21 0.431 0.431 0.431
## .p22 0.582 0.582 0.582
## .p30 0.363 0.363 0.363
## .p31 0.231 0.231 0.231
## .p37 0.391 0.391 0.391
## .p40 0.243 0.243 0.243
## .p44 0.299 0.299 0.299
## .p45 0.327 0.327 0.327
## .p46 0.246 0.246 0.246
## .p51 0.454 0.454 0.454
## .p52 0.334 0.334 0.334
## .p57 0.213 0.213 0.213
## abuse 0.366 0.025 14.584 0.000 1.000 1.000
##
## R-Square:
## Estimate
## p06 0.366
## p10 0.233
## p14 0.629
## p25 0.310
## p27 0.391
## p29 0.660
## p33 0.744
## p35 0.458
## p48 0.625
## p49 0.502
## p53 0.535
## p54 0.450
## p07 0.398
## p11 0.532
## p13 0.449
## p17 0.418
## p24 0.558
## p26 0.424
## p36 0.560
## p55 0.461
## p56 0.516
## p01 0.411
## p18 0.383
## p19 0.566
## p23 0.358
## p39 0.405
## p43 0.492
## p09 0.411
## p12 0.337
## p16 0.125
## p20 0.454
## p28 0.500
## p47 0.448
## p50 0.555
## p02 0.668
## p03 0.506
## p04 0.481
## p21 0.569
## p22 0.418
## p30 0.637
## p31 0.769
## p37 0.609
## p40 0.757
## p44 0.701
## p45 0.673
## p46 0.754
## p51 0.546
## p52 0.666
## p57 0.787
NOTE: With respect to fit of the structural model, we are now fitting a single factor INSTEAD OF 5 factors and a higher-order factor. This will tell us the extent to which a single score is appropriate.
To test the fit against the higher-order factor model, we direct DIFFTEST on the ANALYSIS command to use the results from the previous model.
anova(ifaSingleEstimates, ifaNoHighEstimates)
##
## Scaled Chi-Squared Difference Test (method = "satorra.2000")
##
## lavaan->lavTestLRT():
## lavaan NOTE: The "Chisq" column contains standard test statistics, not the
## robust test that should be reported per model. A robust difference test is
## a function of two standard (not robust) statistics.
## Df AIC BIC Chisq Chisq diff Df diff Pr(>Chisq)
## ifaNoHighEstimates 1117 5673.9
## ifaSingleEstimates 1127 8446.7 769.18 10 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Again, lavaan throws an error. We’ll use the Mplus
result in our write up below.
After examining the fit of each of the five factors individually, as
described previously, a combined model was estimated in which all five
factors were fit simultaneously with covariances estimated freely among
them. A total of 49 items were thus included. Each factor was identified
by fixing the first item loading on each factor to 1, estimating the
factor variance, and then fixing the factor mean to 0, while estimating
all possible item intercepts, item residual variances, and remaining
item loadings. Robust maximum likelihood (MLR) estimation was used to
estimate all higher-order models using the lavaan package
(Rosseel, 2012) in R (R Core Team, 2017), and differences in fit between
nested models were evaluated using −2* rescaled difference in the model
log-likelihood values.
As shown in Table 1, the fit of the model with five correlated factors was acceptable by the RMSEA (.047), but not by the CFI (.844). Standardized model parameters (loadings, intercepts, and residual variances) are shown in Table 2. Correlations of .6 or higher were found among the five factors, suggesting evidence that the five factors may indicate a single higher-order factor. This idea was testing by eliminating the covariances among the factors and instead estimating loadings for the five factors from a single higher-order factor (whose variance was fixed to 1). Although the fit of the higher-order factor model remained marginal (see Table 1), a nested model comparison revealed a significant decrease in fit, −2ΔLL(5) = 47.083, p < .0001, indicating that a single factor did not appear adequate to describe the pattern of correlation amongst the five factors. A further nested model comparison was conducted to examine the extent to which a single factor could describe the covariances among the items rather than five lower-order factors and a single higher-order factor. Fit of the single factor only model was poor, as shown in Table 1, and was significantly worse than the higher-order factor model, −2ΔLL(5) = 448.91, p < .0001, indicating that a single “total score” would not be recommended.
After examining the fit of each of the five factors individually, as
described previously, a combined model was estimated in which all five
factors were fit simultaneously with covariances estimated freely among
them. A total of 49 items were thus included. Each factor was identified
by fixing the first item loading on each factor to 1, estimating the
factor variance, and then fixing the factor mean to 0, while estimating
all possible item thresholds (four for each item given five response
options) and remaining item loadings. WLSMV estimation in the
lavaan package (Rosseel, 2012) in R (R Core Team, 2017)
including a probit link and the THETA parameterization (such that all
item residual variances were constrained to 1) was used to estimate all
higher-order models. Thus, model fit statistics describe the fit of the
item factor model to the polychoric correlation matrix among the items.
Nested model comparisons were conducted using the Mplus DIFFTEST
procedure.
As shown in Table 1, the fit of the model with five correlated factors was acceptable. Item factor analysis parameters (loadings and thresholds) and their corresponding item response model parameters (discriminations and difficulties) are shown in Table 2. Correlations of .7 or higher were found amongst the five factors, suggesting evidence that the five factors may indicate a single higher-order factor. This idea was testing by eliminating the covariances among the factors and instead estimating loadings for the five factors from a single higher-order factor (whose variance was fixed to 1). Although the fit of the higher-order factor model remained acceptable (see Table 1), a nested model comparison via the DIFFTEST procedure revealed a significant decrease in fit, DIFFTEST(5) = 92.05, p < .0001, indicating that a single factor did not appear adequate to describe the pattern of correlation amongst the five factors. A further nested model comparison was conducted to examine the extent to which a single factor could describe the polychoric correlations among the items rather than five lower-order factors and a single higher-order factor. Fit of the single factor only model was poor, as shown in Table 1, and was significantly worse than the higher-order factor model, DIFFTEST(5) = 611.95, p < .0001, indicating that a single score would not be recommended.
Table 1 = table with fit info per model Table 2 would have actual model parameters…. (unstandardized and standardized estimates and their SEs, so 4 columns)