Does anyone want to work with me though this book? We will compare exercises to each other.
Download:
http://gen.lib.rus.ec/book/index.php?md5=8499a4cf4ebc5252ba6ff28561fde3e6
About:
https://blogs.baylor.edu/rlatentvariable/
https://www.goodreads.com/book/show/20609783-latent-variable-modeling-using-r
http://www.amazon.com/Latent-Variable-Modeling-Using-Step/dp/1848726996
Back to [Archive] Other discussions
Chapter 2
[spoiler]
#Part 1
MathHmwk.data = read.csv("MathHmwk.txt",sep="", header=TRUE)#load the file
head(MathHmwk.data) #print its head
colnames(MathHmwk.data)
lm(MathAchievement ~ MathHomework, data=MathHmwk.data) #regress MathAch
cor(MathHmwk.data) #correlations = standardized cov
MathHmwk.model = '
MathAchievement ~ MathHomework
'
MathHmwk.fit = sem(MathHmwk.model, data=MathHmwk.data) #run sem to fit
summary(MathHmwk.fit) #print
MathHmwk.fit = sem(model = MathHmwk.model, data=MathHmwk.data, std.ov = TRUE) #again, with standardized. However, it is bugged for some reason.
summary(MathHmwk.fit) #print
#alternative method, standardized data first manually
Z.MathHmwk.data = scale(MathHmwk.data) #scale data = z-transform
MathHmwk.fit = sem(model = MathHmwk.model, data=Z.MathHmwk.data) #run sem to fit
summary(MathHmwk.fit) #print
#Part 2
#manually type in Table 2.5
PK.cor = lower2full(c(1,.178,1,.23,.327,1,.106,.245,.183,1,.195,.356,.721,.178,1)) #make a table
colnames(PK.cor) = rownames(PK.cor) = c("Race","SES","CogAbil","SchTyp","AcadAch") #give names with double assign
PK.cor #print and see
PK.model = '
SES ~ a*Race
SchTyp ~ b*Race + d*CogAbil + f*SES
CogAbil ~ c*Race + e*SES
AcadAch ~ d*Race + g*SES + i*CogAbil + j*SchTyp
'
#there is no covariance matrix, so I'm inputting the correlation matrix.
PK.fit = sem(model=PK.model, sample.cov=PK.cor, sample.nobs = 18058) #input data
summary(PK.fit) #print stuff
#answer is j = .021
#Part 3
MK.cov = lower2full(c(84.85,71.28,140.34,18.83,-6.25,72.92,60.05,84.54,37.18,139.48)) #input data manually
colnames(MK.cov) = rownames(MK.cov) = c("TeacherExp","SocialCli","MatCov","StudAch") #name stuff
MK.cov #print to check
MK.model = '
SocialCli ~ a1*TeacherExp
MatCov ~ a2*TeacherExp
StudAch ~ b1*SocialCli + b2*MatCov
c1 := a1*b1
c2 := a2*b2
'
#fit it
MK.fit = sem(model=MK.model, sample.cov=MK.cov, sample.nobs = 40) #input data
summary(MK.fit) #print stuff
#results: c1 = .527, c2 = .125
[/spoiler]
Chapter 3
[spoiler]
#3.1
#enter covtrix
um.cov = lower2full(c(.77,.38,.65,.39,.39,.62,-.25,-.32,-.27,6.09))
colnames(um.cov) = rownames(um.cov) = c("D1","D2","D3","SA")
#standard method
um.model1 = '
LV =~ a*D1 + b*D2 + c*D3 + d*SA
'
#fit and print
um.model1.fit = cfa(um.model1, sample.cov=um.cov, sample.nobs=6053)
summary(um.model1.fit, standardized=T)
#marker method
um.model2 = '
LV =~ NA*D1 + a*D1 + b*D2 + c*D3 + d*SA
#contrain LV
LV~~1*LV
'
#fit
um.model2.fit = cfa(um.model2, sample.cov=um.cov, sample.nobs=6053)
#easy way fit
um.model2.fit = cfa(um.model, sample.cov=um.cov, sample.nobs=6053,std.lv=TRUE)
#print
summary(um.model2.fit, standardized=T)
#effects coding method
um.model3 = '
LV =~ NA*D1 + a*D1 + b*D2 + c*D3 + d*SA
#constrain
a+b+c+d == 4
'
#fit and print
um.model3.fit = cfa(um.model3, sample.cov=um.cov, sample.nobs=6053)
summary(um.model3.fit, standardized=T)
#3.2
#make covtrix
um2.cov = lower2full(c(.77,
.38,.65,
.39,.39,.62,
-.25,-.32,-.27,6.09,
.31,.29,.26,-.36,7.67,
.24,.25,.19,-.18,.51,1.69,
-3.16,-3.56,-2.63,6.09,-3.12,-4.58,204.79,
-.92,-.88,-.72,.88,-1.49,-1.41,16.53,7.24))
colnames(um2.cov) = rownames(um2.cov) = c("D1","D2","D3","SA","F","CC","PA","PM")
um2.model = '
PSH =~ a*D1+b*D2+c*D3+d*SA+h*PM
PHH =~ e*CC+f*PA+g*F+i*PM
'
#fit and print
um2.model.fit = cfa(um2.model, sample.cov=um2.cov, sample.nobs=6053)
summary(um2.model.fit,standardized=T)
#3.3
#make covtrix
G.cov = lower2full(c(59.66,
11.18,22.30,
2.63,.41,1))
colnames(G.cov) = rownames(G.cov) = c("DV1","DV2","C") #set names
G.model = '
F ~ a*DV1+b*DV2
F =~ c*C
E =~ C
E~~d*E
'
#fit
G.model.fit = cfa(G.model, sample.cov=G.cov, sample.nobs=288)
#print
summary(G.model.fit,standardized=T)
parameterEstimates(G.model.fit,standardized=TRUE,ci=FALSE)
#I'm unsure about 3.3.c last part. F=~C is .51, so R^2 is 0.2601, 1-0.2601=0.7399. Is this right?
[/spoiler]
[spoiler]
#Part 1
MathHmwk.data = read.csv("MathHmwk.txt",sep="", header=TRUE)#load the file
head(MathHmwk.data) #print its head
colnames(MathHmwk.data)
lm(MathAchievement ~ MathHomework, data=MathHmwk.data) #regress MathAch
cor(MathHmwk.data) #correlations = standardized cov
MathHmwk.model = '
MathAchievement ~ MathHomework
'
MathHmwk.fit = sem(MathHmwk.model, data=MathHmwk.data) #run sem to fit
summary(MathHmwk.fit) #print
MathHmwk.fit = sem(model = MathHmwk.model, data=MathHmwk.data, std.ov = TRUE) #again, with standardized. However, it is bugged for some reason.
summary(MathHmwk.fit) #print
#alternative method, standardized data first manually
Z.MathHmwk.data = scale(MathHmwk.data) #scale data = z-transform
MathHmwk.fit = sem(model = MathHmwk.model, data=Z.MathHmwk.data) #run sem to fit
summary(MathHmwk.fit) #print
#Part 2
#manually type in Table 2.5
PK.cor = lower2full(c(1,.178,1,.23,.327,1,.106,.245,.183,1,.195,.356,.721,.178,1)) #make a table
colnames(PK.cor) = rownames(PK.cor) = c("Race","SES","CogAbil","SchTyp","AcadAch") #give names with double assign
PK.cor #print and see
PK.model = '
SES ~ a*Race
SchTyp ~ b*Race + d*CogAbil + f*SES
CogAbil ~ c*Race + e*SES
AcadAch ~ d*Race + g*SES + i*CogAbil + j*SchTyp
'
#there is no covariance matrix, so I'm inputting the correlation matrix.
PK.fit = sem(model=PK.model, sample.cov=PK.cor, sample.nobs = 18058) #input data
summary(PK.fit) #print stuff
#answer is j = .021
#Part 3
MK.cov = lower2full(c(84.85,71.28,140.34,18.83,-6.25,72.92,60.05,84.54,37.18,139.48)) #input data manually
colnames(MK.cov) = rownames(MK.cov) = c("TeacherExp","SocialCli","MatCov","StudAch") #name stuff
MK.cov #print to check
MK.model = '
SocialCli ~ a1*TeacherExp
MatCov ~ a2*TeacherExp
StudAch ~ b1*SocialCli + b2*MatCov
c1 := a1*b1
c2 := a2*b2
'
#fit it
MK.fit = sem(model=MK.model, sample.cov=MK.cov, sample.nobs = 40) #input data
summary(MK.fit) #print stuff
#results: c1 = .527, c2 = .125
[/spoiler]
Chapter 3
[spoiler]
#3.1
#enter covtrix
um.cov = lower2full(c(.77,.38,.65,.39,.39,.62,-.25,-.32,-.27,6.09))
colnames(um.cov) = rownames(um.cov) = c("D1","D2","D3","SA")
#standard method
um.model1 = '
LV =~ a*D1 + b*D2 + c*D3 + d*SA
'
#fit and print
um.model1.fit = cfa(um.model1, sample.cov=um.cov, sample.nobs=6053)
summary(um.model1.fit, standardized=T)
#marker method
um.model2 = '
LV =~ NA*D1 + a*D1 + b*D2 + c*D3 + d*SA
#contrain LV
LV~~1*LV
'
#fit
um.model2.fit = cfa(um.model2, sample.cov=um.cov, sample.nobs=6053)
#easy way fit
um.model2.fit = cfa(um.model, sample.cov=um.cov, sample.nobs=6053,std.lv=TRUE)
summary(um.model2.fit, standardized=T)
#effects coding method
um.model3 = '
LV =~ NA*D1 + a*D1 + b*D2 + c*D3 + d*SA
#constrain
a+b+c+d == 4
'
#fit and print
um.model3.fit = cfa(um.model3, sample.cov=um.cov, sample.nobs=6053)
summary(um.model3.fit, standardized=T)
#3.2
#make covtrix
um2.cov = lower2full(c(.77,
.38,.65,
.39,.39,.62,
-.25,-.32,-.27,6.09,
.31,.29,.26,-.36,7.67,
.24,.25,.19,-.18,.51,1.69,
-3.16,-3.56,-2.63,6.09,-3.12,-4.58,204.79,
-.92,-.88,-.72,.88,-1.49,-1.41,16.53,7.24))
colnames(um2.cov) = rownames(um2.cov) = c("D1","D2","D3","SA","F","CC","PA","PM")
um2.model = '
PSH =~ a*D1+b*D2+c*D3+d*SA+h*PM
PHH =~ e*CC+f*PA+g*F+i*PM
'
#fit and print
um2.model.fit = cfa(um2.model, sample.cov=um2.cov, sample.nobs=6053)
summary(um2.model.fit,standardized=T)
#3.3
#make covtrix
G.cov = lower2full(c(59.66,
11.18,22.30,
2.63,.41,1))
colnames(G.cov) = rownames(G.cov) = c("DV1","DV2","C") #set names
G.model = '
F ~ a*DV1+b*DV2
F =~ c*C
E =~ C
E~~d*E
'
#fit
G.model.fit = cfa(G.model, sample.cov=G.cov, sample.nobs=288)
summary(G.model.fit,standardized=T)
parameterEstimates(G.model.fit,standardized=TRUE,ci=FALSE)
#I'm unsure about 3.3.c last part. F=~C is .51, so R^2 is 0.2601, 1-0.2601=0.7399. Is this right?
[/spoiler]
[spoiler]
#4.1
wisc.cov = lower2full(c(1,
.31,1,
.36,.40,1,
.51,.46,.45,1,
.29,.40,.33,.43,1,
.39,.29,.27,.36,.33,1,
.32,.27,.29,.33,.24,.28,1,
.22,.32,.15,.22,.27,.12,.26,1))
#no info was given how to use this, but one has to write them along the rows too
wisc2.cov = upper2full(c(1,.43,.45,.61,.34,.33,.43,.18,
1,.43,.50,.47,.25,.42,.22,
1,.59,.34,.36,.50,.18,
1,.35,.37,.43,.27,
1,.18,.32,.18,
1,.42,.13,
1,.26,
1))
wisc3.cov = lower2full(c(1,
.33,1,
.46,.46,1,
.61,.41,.64,1,
.22,.39,.36,.30,1,
.23,.30,.33,.31,.25,1,
.22,.36,.37,.35,.30,.47,1,
.13,.22,.18,.19,.09,.24,.20,1))
#this time i copied them from the book and added commas, much faster
wisc4.cov = upper2full(c(1.00, 0.46, 0.58, 0.63, 0.27, 0.45, 0.33, 0.15,
1.00, 0.55, 0.43, 0.51, 0.38, 0.52, 0.27,
1.00, 0.73, 0.37, 0.37, 0.49, 0.16,
1.00, 0.33, 0.43, 0.41, 0.09,
1.00, 0.13, 0.29, 0.12,
1.00, 0.43, 0.25,
1.00, 0.23,
1.00))
# variable names
wisc.names <- c("Info", "Sim", "Vocab","Comp", "PicComp", "PicArr", "BlkDsgn", "ObjAsmb")
colnames(wisc.cov) <- rownames(wisc.cov) <- wisc.names #set names for covtrix 1-4
colnames(wisc2.cov) <- rownames(wisc2.cov) <- wisc.names
colnames(wisc3.cov) <- rownames(wisc3.cov) <- wisc.names
colnames(wisc4.cov) <- rownames(wisc4.cov) <- wisc.names
#means
wisc.mean = c(7.83, 5.50, 5.67, 21.50, 7.67, 8.00, 6.50, 34.83)
wisc2.mean = c(11.00, 7.83, 8.33, 19.67, 8.67, 13.67, 12.00, 39.00)
wisc3.mean = c(3.52, 2.19, 3.19, 4.76, 2.53, 3.85, 10.25, 10.51)
wisc4.mean = c(15.17, 15.00, 11.83, 21.67, 12.17, 17.83, 18.67, 45.83)
#sd's
wisc.sd = c(2.69, 1.50, 2.36, 6.06, 1.85, 2.18, 5.97, 9.94)
wisc2.sd = c(3.63, 1.77, 3.26, 5.07, 2.86, 4.25, 8.82, 8.05)
wisc3.sd = c(3.52, 2.19, 3.19, 4.76, 2.53, 3.85, 10.25, 10.51)
wisc4.sd = c(4.93, 4.10, 5.20, 6.54, 2.72, 5.35, 9.36, 10.44)
#n's
wisc.n = wisc2.n = wisc3.n = wisc4.n = 200
#make lists
covs.wisc = list(wisc=wisc.cov, wisc2=wisc2.cov, wisc3=wisc3.cov, wisc4=wisc4.cov)
means.wisc = list(wisc=wisc.mean, wisc2=wisc2.mean, wisc3=wisc3.mean, wisc4=wisc4.mean)
sds.wisc = list(wisc=wisc.sd, wisc2=wisc2.sd, wisc3=wisc3.sd, wisc4=wisc4.sd)
n.wisc = list(wisc=wisc.n, wisc2=wisc2.n, wisc3=wisc3.n, wisc4=wisc4.n)
#the exercise does not specify which model to use. I used the one used earlier in the same chapter.
wisc.model <-'
VC =~ Info + Sim + Vocab + Comp
VS =~ PicComp + PicArr + BlkDsgn + ObjAsmb
VC ~~ VS
'
# specify fit indices of interest
fit.indices <- c("chisq", "df", "cfi", "rmsea", "srmr", "mfi")
#fit each model by itself
#1
wisc.fit <- cfa(wisc.model, sample.cov=wisc.cov, sample.nobs=wisc.n, sample.mean=wisc.mean, meanstructure=TRUE)
fitMeasures(wisc.fit, fit.indices)
#2
wisc2.fit <- cfa(wisc.model, sample.cov=wisc2.cov, sample.nobs=wisc2.n, sample.mean=wisc2.mean, meanstructure=TRUE)
fitMeasures(wisc2.fit, fit.indices)
#3
wisc3.fit <- cfa(wisc.model, sample.cov=wisc3.cov, sample.nobs=wisc3.n, sample.mean=wisc3.mean, meanstructure=TRUE)
fitMeasures(wisc3.fit, fit.indices)
#4
wisc4.fit <- cfa(wisc.model, sample.cov=wisc4.cov, sample.nobs=wisc4.n, sample.mean=wisc4.mean, meanstructure=TRUE)
fitMeasures(wisc4.fit, fit.indices)
#pretty bad fit for wisc4
#try each step of invariance
configural.wisc.fit <- cfa(wisc.model, sample.cov=covs.wisc, sample.nobs=n.wisc, sample.mean=means.wisc)
fitMeasures(configural.wisc.fit, fit.indices)
#probably not acceptable
# weak invariance
weak.wisc.fit <- cfa(wisc.model, sample.cov=covs.wisc, sample.nobs=n.wisc, sample.mean=means.wisc, group.equal=c("loadings"))
fitMeasures(weak.wisc.fit, fit.indices)
#about the same as before
# strong invariance
strong.wisc.fit <- cfa(wisc.model, sample.cov=covs.wisc, sample.nobs=n.wisc, sample.mean=means.wisc, group.equal=c("loadings", "intercepts"))
fitMeasures(strong.wisc.fit, fit.indices)
#lots of stuff wrong, definitely not strongly invariant
#doesn't seem much reason to go on
#4.2
#data is inputted above
#fit statistics for ADE
fitMeasures(bmi.ade.fit, fit.indices)
# specify ACE model - the only change from ADE is that C's are the same for MZ/DZ, instead of (1,.25) for D.
bmi.ace.model<-'
# build the factor model with group constraints
A1=~ NA*P1 + c(a,a)*P1 + c(.5,.5)*P1
A2=~ NA*P2 + c(a,a)*P2 + c(.5,.5)*P2
C1 =~ NA*P1 + c(c,c)*P1
C2 =~ NA*P2 + c(c,c)*P2
# constrain the factor variances
A1 ~~ 1*A1
A2 ~~ 1*A2
C1 ~~ 1*C1
C2 ~~ 1*C2
P1~~c(e2,e2)*P1
P2~~c(e2,e2)*P2
# constrain the factor covariances
A1 ~~ c(1,.5)*A2
A1 ~~ 0*C1 + 0*C2
A2 ~~ 0*C1 + 0*C2
C1 ~~ c(1,1)*C2
'
# fit model
bmi.ace.fit <- cfa(bmi.ace.model, sample.cov=bmi.cov, sample.nobs=bmi.n)
summary(bmi.ace.fit, standardized=TRUE)
fitMeasures(bmi.ace.fit, fit.indices) #fits worse than ADE model
# specify CE model - just remove any mention of A from ACE model
bmi.ce.model<-'
# build the factor model with group constraints
C1 =~ NA*P1 + c(c,c)*P1
C2 =~ NA*P2 + c(c,c)*P2
# constrain the factor variances
C1 ~~ 1*C1
C2 ~~ 1*C2
P1~~c(e2,e2)*P1
P2~~c(e2,e2)*P2
# constrain the factor covariances
C1 ~~ c(1,1)*C2
'
# fit model
bmi.ce.fit <- cfa(bmi.ce.model, sample.cov=bmi.cov, sample.nobs=bmi.n)
summary(bmi.ce.fit, standardized=TRUE)
fitMeasures(bmi.ce.fit, fit.indices) #fits much worse than both ACE and ADE
# specify AE model - the only change from ADE/ACE is that we remove any mention of C/D
bmi.ae.model<-'
# build the factor model with group constraints
A1=~ NA*P1 + c(a,a)*P1 + c(.5,.5)*P1
A2=~ NA*P2 + c(a,a)*P2 + c(.5,.5)*P2
# constrain the factor variances
A1 ~~ 1*A1
A2 ~~ 1*A2
P1~~c(e2,e2)*P1
P2~~c(e2,e2)*P2
# constrain the factor covariances
A1 ~~ c(1,.5)*A2
'
# fit model
bmi.ae.fit <- cfa(bmi.ae.model, sample.cov=bmi.cov, sample.nobs=bmi.n)
summary(bmi.ae.fit, standardized=TRUE)
fitMeasures(bmi.ae.fit, fit.indices) #fits marginally better than ACE, but worse than ADE
#4.3
#input data
LSC.1.cov = lower2full(c(1,
0.759, 1.000,
0.762, 0.787, 1.000,
0.028, 0.010, −0.058, 1.000,
−0.061, −0.061, −0.141, 0.785, 1.000,
−0.022, −0.052, −0.102, 0.816, 0.816, 1.000))
LSC.2.cov = upper2full(c(1.000, 0.813, 0.850, −0.188, −0.289, −0.293,
1.000, 0.835, −0.155, −0.250, −0.210,
1.000, −0.215, −0.338, −0.306,
1.000, 0.784, 0.800,
1.000, 0.832,
1.000))
#names
rownames(LSC.1.cov) = colnames(LSC.1.cov) = c("I1","I2","I3","I4","I5","I6")
rownames(LSC.2.cov) = colnames(LSC.2.cov) = c("I1","I2","I3","I4","I5","I6")
#means
LSC.1.means = c(3.135, 2.991, 3.069, 1.701, 1.527, 1.545)
LSC.2.means = c(3.073, 2.847, 2.979, 1.717, 1.580, 1.550)
#sd's
LSC.1.sds = c(0.668, 0.685, 0.707, 0.714, 0.663, 0.653)
LSC.2.sds = c(0.703, 0.718, 0.762, 0.650, 0.602, 0.614)
#n's
LSC.1.n = 380
LSC.2.n = 379
#lists
LSC.cov = list(LSC1=LSC.1.cov, LSC2=LSC.2.cov)
LSC.mean = list(LSC1=LSC.1.means, LSC2=LSC.2.means)
LSC.sd = list(LSC1=LSC.1.sds, LSC2=LSC.2.sds)
LSC.n = list(LSC1=LSC.1.n, LSC2=LSC.2.n)
#model
LSC.model = '
F1 =~ a*I1 + b*I2 + c*I3
F2 =~ d*I4 + e*I5 + f*I6
F1 ~~ F2
'
#fit by itself
LSC.1.fit = cfa(LSC.model, sample.cov=LSC.1.cov, sample.nobs=LSC.1.n, sample.mean=LSC.1.means, meanstructure=TRUE)
fitMeasures(LSC.1.fit, fit.indices) #decent
LSC.2.fit = cfa(LSC.model, sample.cov=LSC.2.cov, sample.nobs=LSC.2.n, sample.mean=LSC.2.means, meanstructure=TRUE)
fitMeasures(LSC.2.fit, fit.indices) #a little worse
#try each step of invariance
#configural
LSC.configural <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean)
fitMeasures(LSC.configural, fit.indices)
#still decent
# weak invariance
LSC.weak <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings"))
fitMeasures(LSC.weak, fit.indices)
#slightly better
# strong invariance
LSC.strong <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings", "intercepts"))
fitMeasures(LSC.strong, fit.indices)
#slightly better again
#with marker indicators
#configural
LSC.configural.marker <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean,std.lv=TRUE)
fitMeasures(LSC.configural, fit.indices)
# weak invariance
LSC.weak.marker <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings"),std.lv=TRUE)
fitMeasures(LSC.weak, fit.indices)
# strong invariance
LSC.strong.marker <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings", "intercepts"),std.lv=TRUE)
fitMeasures(LSC.strong, fit.indices)
#all results identical
#with effects scaling
#model
LSC.model.effects = '
F1 =~ NA*I1 + a*I1 + b*I2 + c*I3
F2 =~ NA*I5 + d*I4 + e*I5 + f*I6
a+b+c == 3
d+e+f == 3
F1 ~~ F2
'
#configural
LSC.configural.effects <- cfa(LSC.model.effects, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean)
fitMeasures(LSC.configural, fit.indices)
# weak invariance
LSC.weak.effects <- cfa(LSC.model.effects, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings"))
fitMeasures(LSC.weak, fit.indices)
# strong invariance
LSC.strong.effects <- cfa(LSC.model.effects, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings", "intercepts"))
fitMeasures(LSC.strong, fit.indices)
#all the same as well
[/spoiler]
#4.1
wisc.cov = lower2full(c(1,
.31,1,
.36,.40,1,
.51,.46,.45,1,
.29,.40,.33,.43,1,
.39,.29,.27,.36,.33,1,
.32,.27,.29,.33,.24,.28,1,
.22,.32,.15,.22,.27,.12,.26,1))
#no info was given how to use this, but one has to write them along the rows too
wisc2.cov = upper2full(c(1,.43,.45,.61,.34,.33,.43,.18,
1,.43,.50,.47,.25,.42,.22,
1,.59,.34,.36,.50,.18,
1,.35,.37,.43,.27,
1,.18,.32,.18,
1,.42,.13,
1,.26,
1))
wisc3.cov = lower2full(c(1,
.33,1,
.46,.46,1,
.61,.41,.64,1,
.22,.39,.36,.30,1,
.23,.30,.33,.31,.25,1,
.22,.36,.37,.35,.30,.47,1,
.13,.22,.18,.19,.09,.24,.20,1))
#this time i copied them from the book and added commas, much faster
wisc4.cov = upper2full(c(1.00, 0.46, 0.58, 0.63, 0.27, 0.45, 0.33, 0.15,
1.00, 0.55, 0.43, 0.51, 0.38, 0.52, 0.27,
1.00, 0.73, 0.37, 0.37, 0.49, 0.16,
1.00, 0.33, 0.43, 0.41, 0.09,
1.00, 0.13, 0.29, 0.12,
1.00, 0.43, 0.25,
1.00, 0.23,
1.00))
# variable names
wisc.names <- c("Info", "Sim", "Vocab","Comp", "PicComp", "PicArr", "BlkDsgn", "ObjAsmb")
colnames(wisc.cov) <- rownames(wisc.cov) <- wisc.names #set names for covtrix 1-4
colnames(wisc2.cov) <- rownames(wisc2.cov) <- wisc.names
colnames(wisc3.cov) <- rownames(wisc3.cov) <- wisc.names
colnames(wisc4.cov) <- rownames(wisc4.cov) <- wisc.names
#means
wisc.mean = c(7.83, 5.50, 5.67, 21.50, 7.67, 8.00, 6.50, 34.83)
wisc2.mean = c(11.00, 7.83, 8.33, 19.67, 8.67, 13.67, 12.00, 39.00)
wisc3.mean = c(3.52, 2.19, 3.19, 4.76, 2.53, 3.85, 10.25, 10.51)
wisc4.mean = c(15.17, 15.00, 11.83, 21.67, 12.17, 17.83, 18.67, 45.83)
#sd's
wisc.sd = c(2.69, 1.50, 2.36, 6.06, 1.85, 2.18, 5.97, 9.94)
wisc2.sd = c(3.63, 1.77, 3.26, 5.07, 2.86, 4.25, 8.82, 8.05)
wisc3.sd = c(3.52, 2.19, 3.19, 4.76, 2.53, 3.85, 10.25, 10.51)
wisc4.sd = c(4.93, 4.10, 5.20, 6.54, 2.72, 5.35, 9.36, 10.44)
#n's
wisc.n = wisc2.n = wisc3.n = wisc4.n = 200
#make lists
covs.wisc = list(wisc=wisc.cov, wisc2=wisc2.cov, wisc3=wisc3.cov, wisc4=wisc4.cov)
means.wisc = list(wisc=wisc.mean, wisc2=wisc2.mean, wisc3=wisc3.mean, wisc4=wisc4.mean)
sds.wisc = list(wisc=wisc.sd, wisc2=wisc2.sd, wisc3=wisc3.sd, wisc4=wisc4.sd)
n.wisc = list(wisc=wisc.n, wisc2=wisc2.n, wisc3=wisc3.n, wisc4=wisc4.n)
#the exercise does not specify which model to use. I used the one used earlier in the same chapter.
wisc.model <-'
VC =~ Info + Sim + Vocab + Comp
VS =~ PicComp + PicArr + BlkDsgn + ObjAsmb
VC ~~ VS
'
# specify fit indices of interest
fit.indices <- c("chisq", "df", "cfi", "rmsea", "srmr", "mfi")
#fit each model by itself
#1
wisc.fit <- cfa(wisc.model, sample.cov=wisc.cov, sample.nobs=wisc.n, sample.mean=wisc.mean, meanstructure=TRUE)
fitMeasures(wisc.fit, fit.indices)
#2
wisc2.fit <- cfa(wisc.model, sample.cov=wisc2.cov, sample.nobs=wisc2.n, sample.mean=wisc2.mean, meanstructure=TRUE)
fitMeasures(wisc2.fit, fit.indices)
#3
wisc3.fit <- cfa(wisc.model, sample.cov=wisc3.cov, sample.nobs=wisc3.n, sample.mean=wisc3.mean, meanstructure=TRUE)
fitMeasures(wisc3.fit, fit.indices)
#4
wisc4.fit <- cfa(wisc.model, sample.cov=wisc4.cov, sample.nobs=wisc4.n, sample.mean=wisc4.mean, meanstructure=TRUE)
fitMeasures(wisc4.fit, fit.indices)
#pretty bad fit for wisc4
#try each step of invariance
configural.wisc.fit <- cfa(wisc.model, sample.cov=covs.wisc, sample.nobs=n.wisc, sample.mean=means.wisc)
fitMeasures(configural.wisc.fit, fit.indices)
#probably not acceptable
# weak invariance
weak.wisc.fit <- cfa(wisc.model, sample.cov=covs.wisc, sample.nobs=n.wisc, sample.mean=means.wisc, group.equal=c("loadings"))
fitMeasures(weak.wisc.fit, fit.indices)
#about the same as before
# strong invariance
strong.wisc.fit <- cfa(wisc.model, sample.cov=covs.wisc, sample.nobs=n.wisc, sample.mean=means.wisc, group.equal=c("loadings", "intercepts"))
fitMeasures(strong.wisc.fit, fit.indices)
#lots of stuff wrong, definitely not strongly invariant
#doesn't seem much reason to go on
#4.2
#data is inputted above
#fit statistics for ADE
fitMeasures(bmi.ade.fit, fit.indices)
# specify ACE model - the only change from ADE is that C's are the same for MZ/DZ, instead of (1,.25) for D.
bmi.ace.model<-'
# build the factor model with group constraints
A1=~ NA*P1 + c(a,a)*P1 + c(.5,.5)*P1
A2=~ NA*P2 + c(a,a)*P2 + c(.5,.5)*P2
C1 =~ NA*P1 + c(c,c)*P1
C2 =~ NA*P2 + c(c,c)*P2
# constrain the factor variances
A1 ~~ 1*A1
A2 ~~ 1*A2
C1 ~~ 1*C1
C2 ~~ 1*C2
P1~~c(e2,e2)*P1
P2~~c(e2,e2)*P2
# constrain the factor covariances
A1 ~~ c(1,.5)*A2
A1 ~~ 0*C1 + 0*C2
A2 ~~ 0*C1 + 0*C2
C1 ~~ c(1,1)*C2
'
# fit model
bmi.ace.fit <- cfa(bmi.ace.model, sample.cov=bmi.cov, sample.nobs=bmi.n)
summary(bmi.ace.fit, standardized=TRUE)
fitMeasures(bmi.ace.fit, fit.indices) #fits worse than ADE model
# specify CE model - just remove any mention of A from ACE model
bmi.ce.model<-'
# build the factor model with group constraints
C1 =~ NA*P1 + c(c,c)*P1
C2 =~ NA*P2 + c(c,c)*P2
# constrain the factor variances
C1 ~~ 1*C1
C2 ~~ 1*C2
P1~~c(e2,e2)*P1
P2~~c(e2,e2)*P2
# constrain the factor covariances
C1 ~~ c(1,1)*C2
'
# fit model
bmi.ce.fit <- cfa(bmi.ce.model, sample.cov=bmi.cov, sample.nobs=bmi.n)
summary(bmi.ce.fit, standardized=TRUE)
fitMeasures(bmi.ce.fit, fit.indices) #fits much worse than both ACE and ADE
# specify AE model - the only change from ADE/ACE is that we remove any mention of C/D
bmi.ae.model<-'
# build the factor model with group constraints
A1=~ NA*P1 + c(a,a)*P1 + c(.5,.5)*P1
A2=~ NA*P2 + c(a,a)*P2 + c(.5,.5)*P2
# constrain the factor variances
A1 ~~ 1*A1
A2 ~~ 1*A2
P1~~c(e2,e2)*P1
P2~~c(e2,e2)*P2
# constrain the factor covariances
A1 ~~ c(1,.5)*A2
'
# fit model
bmi.ae.fit <- cfa(bmi.ae.model, sample.cov=bmi.cov, sample.nobs=bmi.n)
summary(bmi.ae.fit, standardized=TRUE)
fitMeasures(bmi.ae.fit, fit.indices) #fits marginally better than ACE, but worse than ADE
#4.3
#input data
LSC.1.cov = lower2full(c(1,
0.759, 1.000,
0.762, 0.787, 1.000,
0.028, 0.010, −0.058, 1.000,
−0.061, −0.061, −0.141, 0.785, 1.000,
−0.022, −0.052, −0.102, 0.816, 0.816, 1.000))
LSC.2.cov = upper2full(c(1.000, 0.813, 0.850, −0.188, −0.289, −0.293,
1.000, 0.835, −0.155, −0.250, −0.210,
1.000, −0.215, −0.338, −0.306,
1.000, 0.784, 0.800,
1.000, 0.832,
1.000))
#names
rownames(LSC.1.cov) = colnames(LSC.1.cov) = c("I1","I2","I3","I4","I5","I6")
rownames(LSC.2.cov) = colnames(LSC.2.cov) = c("I1","I2","I3","I4","I5","I6")
#means
LSC.1.means = c(3.135, 2.991, 3.069, 1.701, 1.527, 1.545)
LSC.2.means = c(3.073, 2.847, 2.979, 1.717, 1.580, 1.550)
#sd's
LSC.1.sds = c(0.668, 0.685, 0.707, 0.714, 0.663, 0.653)
LSC.2.sds = c(0.703, 0.718, 0.762, 0.650, 0.602, 0.614)
#n's
LSC.1.n = 380
LSC.2.n = 379
#lists
LSC.cov = list(LSC1=LSC.1.cov, LSC2=LSC.2.cov)
LSC.mean = list(LSC1=LSC.1.means, LSC2=LSC.2.means)
LSC.sd = list(LSC1=LSC.1.sds, LSC2=LSC.2.sds)
LSC.n = list(LSC1=LSC.1.n, LSC2=LSC.2.n)
#model
LSC.model = '
F1 =~ a*I1 + b*I2 + c*I3
F2 =~ d*I4 + e*I5 + f*I6
F1 ~~ F2
'
#fit by itself
LSC.1.fit = cfa(LSC.model, sample.cov=LSC.1.cov, sample.nobs=LSC.1.n, sample.mean=LSC.1.means, meanstructure=TRUE)
fitMeasures(LSC.1.fit, fit.indices) #decent
LSC.2.fit = cfa(LSC.model, sample.cov=LSC.2.cov, sample.nobs=LSC.2.n, sample.mean=LSC.2.means, meanstructure=TRUE)
fitMeasures(LSC.2.fit, fit.indices) #a little worse
#try each step of invariance
#configural
LSC.configural <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean)
fitMeasures(LSC.configural, fit.indices)
#still decent
# weak invariance
LSC.weak <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings"))
fitMeasures(LSC.weak, fit.indices)
#slightly better
# strong invariance
LSC.strong <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings", "intercepts"))
fitMeasures(LSC.strong, fit.indices)
#slightly better again
#with marker indicators
#configural
LSC.configural.marker <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean,std.lv=TRUE)
fitMeasures(LSC.configural, fit.indices)
# weak invariance
LSC.weak.marker <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings"),std.lv=TRUE)
fitMeasures(LSC.weak, fit.indices)
# strong invariance
LSC.strong.marker <- cfa(LSC.model, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings", "intercepts"),std.lv=TRUE)
fitMeasures(LSC.strong, fit.indices)
#all results identical
#with effects scaling
#model
LSC.model.effects = '
F1 =~ NA*I1 + a*I1 + b*I2 + c*I3
F2 =~ NA*I5 + d*I4 + e*I5 + f*I6
a+b+c == 3
d+e+f == 3
F1 ~~ F2
'
#configural
LSC.configural.effects <- cfa(LSC.model.effects, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean)
fitMeasures(LSC.configural, fit.indices)
# weak invariance
LSC.weak.effects <- cfa(LSC.model.effects, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings"))
fitMeasures(LSC.weak, fit.indices)
# strong invariance
LSC.strong.effects <- cfa(LSC.model.effects, sample.cov=LSC.cov, sample.nobs=LSC.n, sample.mean=LSC.mean, group.equal=c("loadings", "intercepts"))
fitMeasures(LSC.strong, fit.indices)
#all the same as well
[/spoiler]