我正在尝试重现两者之间lm和lmer重复测量(2x2x2)之间的几个交互测试。我想比较这两种方法的原因是因为 SPSS 用于重复测量的 GLM 产生的结果与此处介绍的方法完全相同lm,所以最后我想比较 SPSS 与 R-lmer。到目前为止,我只设法(密切地)重现了其中一些交互。
您会在下面找到一个脚本来更好地说明我的观点:
library(data.table)
library(tidyr)
library(lmerTest)
library(MASS)
set.seed(1)
N <- 100 # number of subjects
sigma <- 1 # popuplation sd
rho <- .6 # correlation between variables
# X1: a a a a b b b b
# X2: a a b b a a b b
# X3: a b a b a b a b
mu <- c(5, 3, 3, 5, 3, 5, 5, 3) # means
# Simulate the data
sigma.mat <- rep(sigma, length(mu))
S <- matrix(sigma.mat, ncol = length(sigma.mat), nrow = length(sigma.mat))
Sigma <- t(S) * S * rho
diag(Sigma) <- sigma**2
X <- data.table( mvrnorm(N, mu, Sigma) )
setnames(X, names(X), c("aaa", "aab", "aba", "abb", "baa", "bab", "bba", "bbb"))
X[, id := 1:.N]
# Long format
XL <- data.table( gather(X, key, Y, aaa:bbb) )
XL[, X1 := substr(key, 1, 1)]
XL[, X2 := substr(key, 2, 2)]
XL[, X3 := substr(key, 3, 3)]
# Recode long format (a = +1; b = -1)
XL[, X1c := ifelse(X1 == "a", +1, -1)]
XL[, X2c := ifelse(X2 == "a", +1, -1)]
XL[, X3c := ifelse(X3 == "a", +1, -1)]
### Composite scores to be used with lm
# X2:X3 2-way interaction (for half the data; i.e. when X1 == "a")
X[, X1a_X2.X3 := (aaa - aab) - (aba - abb)]
# X2:X3 2-way interaction (for all the data)
X[, aa := (aaa + baa) / 2]
X[, ab := (aab + bab) / 2]
X[, ba := (aba + bba) / 2]
X[, bb := (abb + bbb) / 2]
X[, X2.X3 := (aa - ab) - (ba - bb)]
# X1:X2:X3 3-way interaction (for all the data)
X[, X1.X2.X3 := ( (aaa - aab) - (aba - abb) ) - ( (baa - bab) - (bba - bbb) )]
### Fit models
# X2:X3 2-way interaction (for half the data; i.e. when X1 == "a")
summary( lm(X1a_X2.X3 ~ 1, X) ) # t = 34.13303
summary( lmer(Y ~ X2c*X3c + (X2c+X3c|id), XL[X1 == "a"]) ) # t = 34.132846 close match
summary( lmer(Y ~ X2c*X3c + (X2c+X3c||id), XL[X1 == "a"]) ) # t = 34.134624 close match
# X2:X3 2-way interaction (for all the data)
summary( lm(X2.X3 ~ 1, X) ) # t = 0.3075025
summary( lmer(Y ~ X2c*X3c + (X2c+X3c|id), XL) ) # t = 0.1641932
summary( lmer(Y ~ X2c*X3c + (X2c+X3c||id), XL) ) # t = 0.1640710
summary( lmer(Y ~ X2c*X3c + (X2c*X3c|id), XL) ) # t = 0.1641765
anova( lmer(Y ~ X2c*X3c + (X2c*X3c|id), XL), ddf = "Kenward-Roger" ) # t = 0.1643168
summary( lmer(Y ~ X2c*X3c + (X2c*X3c|id), XL, REML = FALSE) ) # t = 0.1645303
summary( lmer(Y ~ X2c*X3c + (X2c*X3c||id), XL) ) # t = 0.1640704
# X1:X2:X3 3-way interaction (for all the data)
summary( lm(X1.X2.X3 ~ 1, X) ) # t = 46.50177
summary( lmer(Y ~ X1c*X2c*X3c + (X1c*X2c*X3c - X1c:X2c:X3c|id), XL) ) # t = 49.0317599
anova( lmer(Y ~ X1c*X2c*X3c + (X1c*X2c*X3c - X1c:X2c:X3c|id), XL), ddf = "Kenward-Roger" ) # t = 49.03176
summary( lmer(Y ~ X1c*X2c*X3c + (X1c*X2c*X3c - X1c:X2c:X3c|id), XL, REML = FALSE) ) # t = 49.2677606
summary( lmer(Y ~ X1c*X2c*X3c + (X1c*X2c*X3c - X1c:X2c:X3c||id), XL) ) # t = 46.5193774 close match
summary( lmer(Y ~ X1c*X2c*X3c + (X1c*X2c*X3c|id), XL) ) # unidentifiable
summary( lmer(Y ~ X1c*X2c*X3c + (X1c*X2c*X3c|id), XL,
control = lmerControl(check.nobs.vs.nRE="ignore")) ) # t = 46.5148684 close match
从上面可以看出,没有一个估计与这些lm估计完全匹配lmer。尽管某些结果非常相似,并且可能仅由于数值/计算原因而有所不同。两种估计方法之间的差距对于X2:X3 2-way interaction (for all the data).
我的问题是,是否有办法使用这两种方法获得完全相同的结果,以及是否有正确的方法来执行分析lmer(尽管它可能与lm结果不匹配)。
奖金:
我注意到t value与 3 向交互相关的因素受编码方式的影响,这对我来说似乎很奇怪:
summary( lmer(Y ~ X1*X2*X3 + (X1*X2*X3 - X1:X2:X3||id), XL) ) # t = 48.36
summary( lmer(Y ~ X1c*X2c*X3c + (X1c*X2c*X3c - X1c:X2c:X3c||id), XL) ) # t = 56.52