ls-means.Rd
LS-means (least squares means, also known as population means and as marginal means) for a range of model types.
LSmeans(object, effect = NULL, at = NULL, level = 0.95, ...)
# S3 method for default
LSmeans(object, effect = NULL, at = NULL, level = 0.95, ...)
# S3 method for lmerMod
LSmeans(object, effect = NULL, at = NULL, level = 0.95, adjust.df = TRUE, ...)
popMeans(object, effect = NULL, at = NULL, level = 0.95, ...)
# S3 method for default
popMeans(object, effect = NULL, at = NULL, level = 0.95, ...)
# S3 method for lmerMod
popMeans(object, effect = NULL, at = NULL, level = 0.95, adjust.df = TRUE, ...)
Model object
A vector of variables. For each configuration of these the estimate will be calculated.
A list of values of covariates (including levels of some factors) to be used in the calculations
The level of the (asymptotic) confidence interval.
Additional arguments; currently not used.
Should denominator degrees of freedom be adjusted?
A dataframe with results from computing the contrasts.
There are restrictions on the formulas allowed in the model object.
For example having y ~ log(x)
will cause an error. Instead one
must define the variable logx = log(x)
and do y ~ logx
.
LSmeans
and popMeans
are synonymous. Some of
the code has been inspired by the lsmeans package.
Notice that LSmeans
and LE_matrix
fails if the model formula contains an offset (as one would
have in connection with e.g. Poisson regression.
## Two way anova:
data(warpbreaks)
m0 <- lm(breaks ~ wool + tension, data=warpbreaks)
m1 <- lm(breaks ~ wool * tension, data=warpbreaks)
LSmeans(m0)
#> estimate std.error statistic df p.value
#> [1,] 28.1481 1.5809 17.8052 50.0000 0
LSmeans(m1)
#> estimate std.error statistic df p.value
#> [1,] 28.1481 1.4888 18.9068 48.0000 0
## same as:
K <- LE_matrix(m0);K
#> (Intercept) woolB tensionM tensionH
#> [1,] 1 0.5 0.3333333 0.3333333
linest(m0, K)
#> estimate std.error statistic df p.value
#> [1,] 28.1481 1.5809 17.8052 50.0000 0
K <- LE_matrix(m1);K
#> (Intercept) woolB tensionM tensionH woolB:tensionM woolB:tensionH
#> [1,] 1 0.5 0.3333333 0.3333333 0.1666667 0.1666667
linest(m1, K)
#> estimate std.error statistic df p.value
#> [1,] 28.1481 1.4888 18.9068 48.0000 0
LE_matrix(m0, effect="wool")
#> (Intercept) woolB tensionM tensionH
#> [1,] 1 0 0.3333333 0.3333333
#> [2,] 1 1 0.3333333 0.3333333
LSmeans(m0, effect="wool")
#> estimate std.error statistic df p.value
#> [1,] 31.0370 2.2357 13.8824 50.0000 0
#> [2,] 25.2593 2.2357 11.2981 50.0000 0
LE_matrix(m1, effect="wool")
#> (Intercept) woolB tensionM tensionH woolB:tensionM woolB:tensionH
#> [1,] 1 0 0.3333333 0.3333333 0.0000000 0.0000000
#> [2,] 1 1 0.3333333 0.3333333 0.3333333 0.3333333
LSmeans(m1, effect="wool")
#> estimate std.error statistic df p.value
#> [1,] 31.0370 2.1055 14.7412 48.0000 0
#> [2,] 25.2593 2.1055 11.9970 48.0000 0
LE_matrix(m0, effect=c("wool", "tension"))
#> (Intercept) woolB tensionM tensionH
#> [1,] 1 0 0 0
#> [2,] 1 1 0 0
#> [3,] 1 0 1 0
#> [4,] 1 1 1 0
#> [5,] 1 0 0 1
#> [6,] 1 1 0 1
LSmeans(m0, effect=c("wool", "tension"))
#> estimate std.error statistic df p.value
#> [1,] 39.2778 3.1618 12.4227 50.0000 0
#> [2,] 33.5000 3.1618 10.5953 50.0000 0
#> [3,] 29.2778 3.1618 9.2599 50.0000 0
#> [4,] 23.5000 3.1618 7.4325 50.0000 0
#> [5,] 24.5556 3.1618 7.7664 50.0000 0
#> [6,] 18.7778 3.1618 5.9390 50.0000 0
LE_matrix(m1, effect=c("wool", "tension"))
#> (Intercept) woolB tensionM tensionH woolB:tensionM woolB:tensionH
#> [1,] 1 0 0 0 0 0
#> [2,] 1 1 0 0 0 0
#> [3,] 1 0 1 0 0 0
#> [4,] 1 1 1 0 1 0
#> [5,] 1 0 0 1 0 0
#> [6,] 1 1 0 1 0 1
LSmeans(m1, effect=c("wool", "tension"))
#> estimate std.error statistic df p.value
#> [1,] 44.5556 3.6468 12.2178 48.0000 0
#> [2,] 28.2222 3.6468 7.7390 48.0000 0
#> [3,] 24.0000 3.6468 6.5812 48.0000 0
#> [4,] 28.7778 3.6468 7.8913 48.0000 0
#> [5,] 24.5556 3.6468 6.7335 48.0000 0
#> [6,] 18.7778 3.6468 5.1492 48.0000 0
## Regression; two parallel regression lines:
data(Puromycin)
m0 <- lm(rate ~ state + log(conc), data=Puromycin)
## Can not use LSmeans / LE_matrix here because of
## the log-transformation. Instead we must do:
Puromycin$lconc <- log( Puromycin$conc )
m1 <- lm(rate ~ state + lconc, data=Puromycin)
LE_matrix(m1)
#> (Intercept) stateuntreated lconc
#> [1,] 1 0.5 -1.905247
LSmeans(m1)
#> estimate std.error statistic df p.value
#> [1,] 126.2787 2.3738 53.1965 20.0000 0
LE_matrix(m1, effect="state")
#> (Intercept) stateuntreated lconc
#> [1,] 1 0 -1.905247
#> [2,] 1 1 -1.905247
LSmeans(m1, effect="state")
#> estimate std.error statistic df p.value
#> [1,] 138.8689 3.2868 42.2509 20.0000 0
#> [2,] 113.6884 3.4332 33.1140 20.0000 0
LE_matrix(m1, effect="state", at=list(lconc=3))
#> (Intercept) stateuntreated lconc
#> [1,] 1 0 3
#> [2,] 1 1 3
LSmeans(m1, effect="state", at=list(lconc=3))
#> estimate std.error statistic df p.value
#> [1,] 298.6019 9.3499 31.9363 20.0000 0
#> [2,] 273.4214 9.6975 28.1950 20.0000 0
## Non estimable contrasts
## ## Make balanced dataset
dat.bal <- expand.grid(list(AA=factor(1:2), BB=factor(1:3),
CC=factor(1:3)))
dat.bal$y <- rnorm(nrow(dat.bal))
## ## Make unbalanced dataset
# 'BB' is nested within 'CC' so BB=1 is only found when CC=1
# and BB=2,3 are found in each CC=2,3,4
dat.nst <- dat.bal
dat.nst$CC <-factor(c(1, 1, 2, 2, 2, 2, 1, 1, 3, 3,
3, 3, 1, 1, 4, 4, 4, 4))
mod.bal <- lm(y ~ AA + BB * CC, data=dat.bal)
mod.nst <- lm(y ~ AA + BB : CC, data=dat.nst)
LSmeans(mod.bal, effect=c("BB", "CC"))
#> estimate std.error statistic df p.value
#> [1,] -0.62454 0.61163 -1.02110 8.00000 0.3371
#> [2,] -0.15477 0.61163 -0.25304 8.00000 0.8066
#> [3,] 0.38479 0.61163 0.62911 8.00000 0.5468
#> [4,] -0.57044 0.61163 -0.93265 8.00000 0.3783
#> [5,] 0.06340 0.61163 0.10366 8.00000 0.9200
#> [6,] 1.27585 0.61163 2.08598 8.00000 0.0705
#> [7,] -0.71744 0.61163 -1.17299 8.00000 0.2745
#> [8,] 1.55610 0.61163 2.54418 8.00000 0.0345
#> [9,] 0.92063 0.61163 1.50521 8.00000 0.1707
LSmeans(mod.nst, effect=c("BB", "CC"))
#> estimate std.error statistic df p.value
#> [1,] -0.63747 0.31643 -2.01459 10.00000 0.0716
#> [2,] NA NA NA NA NA
#> [3,] NA NA NA NA NA
#> [4,] NA NA NA NA NA
#> [5,] -0.15477 0.54807 -0.28239 10.00000 0.7834
#> [6,] 0.38479 0.54807 0.70208 10.00000 0.4986
#> [7,] NA NA NA NA NA
#> [8,] 0.06340 0.54807 0.11568 10.00000 0.9102
#> [9,] 1.27585 0.54807 2.32791 10.00000 0.0422
#> [10,] NA NA NA NA NA
#> [11,] 1.55610 0.54807 2.83925 10.00000 0.0176
#> [12,] NA NA NA NA NA
LSmeans(mod.nst, at=list(BB=1, CC=1))
#> estimate std.error statistic df p.value
#> [1,] -0.63747 0.31643 -2.01459 10.00000 0.0716
LSmeans(mod.nst, at=list(BB=1, CC=2))
#> estimate std.error statistic df p.value
#> [1,] NA NA NA NA NA
## Above: NA's are correct; not an estimable function
if( require( lme4 )){
warp.mm <- lmer(breaks ~ -1 + tension + (1|wool), data=warpbreaks)
LSmeans(warp.mm, effect="tension")
class(warp.mm)
fixef(warp.mm)
coef(summary(warp.mm))
vcov(warp.mm)
if (require(pbkrtest))
vcovAdj(warp.mm)
}
#> Loading required package: lme4
#> Loading required package: Matrix
#> Loading required package: pbkrtest
#> 3 x 3 Matrix of class "dgeMatrix"
#> tensionL tensionM tensionH
#> tensionL 13.344136 5.846482 5.846482
#> tensionM 5.846482 13.344136 5.846482
#> tensionH 5.846482 5.846482 13.344136
LSmeans(warp.mm, effect="tension")
#> estimate std.error statistic df p.value
#> [1,] 36.3889 3.6530 9.9615 2.5383 0.0042
#> [2,] 26.3889 3.6530 7.2240 2.5383 0.0094
#> [3,] 21.6667 3.6530 5.9313 2.5383 0.0151