Skip to content

Commit 7b2aab8

Browse files
committed
fix
1 parent db582dc commit 7b2aab8

File tree

5 files changed

+98
-34
lines changed

5 files changed

+98
-34
lines changed

DESCRIPTION

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
Type: Package
22
Package: modelbased
33
Title: Estimation of Model-Based Predictions, Contrasts and Means
4-
Version: 0.13.1
4+
Version: 0.13.1.1
55
Authors@R:
66
c(person(given = "Dominique",
77
family = "Makowski",

NEWS.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1+
# modelbased (devel)
2+
3+
## Bug fixes
4+
5+
* Fixed issue in `estimate_slope()` when `p_adjust = "esarey"`.
6+
17
# modelbased 0.13.1
28

39
## Changes

R/p_adjust.R

Lines changed: 50 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,22 @@
2424
# we have to print a different error message.
2525
emmeans_options <- c("scheffe", "mvt", "dunnettx")
2626

27-
all_methods <- c(tolower(stats::p.adjust.methods), emmeans_options, "tukey", "sidak", "esarey", "sup-t")
27+
all_methods <- c(
28+
tolower(stats::p.adjust.methods),
29+
emmeans_options,
30+
"tukey",
31+
"sidak",
32+
"esarey",
33+
"sup-t"
34+
)
2835
insight::validate_argument(p_adjust, all_methods)
2936

3037
# emmeans methods? Then tell user
3138
if (p_adjust %in% emmeans_options) {
3239
insight::format_error(paste0(
33-
"`p_adjust = \"", p_adjust, "\"` is only available when `backend = \"emmeans\"."
40+
"`p_adjust = \"",
41+
p_adjust,
42+
"\"` is only available when `backend = \"emmeans\"."
3443
))
3544
}
3645

@@ -96,12 +105,16 @@
96105
# columns.
97106
coef_column <- intersect(c(.valid_coefficient_names(), "estimate"), colnames(params))[1]
98107
if (is.na(coef_column)) {
99-
insight::format_alert("Could not find coefficient column to apply `sup-t` adjustment.")
108+
insight::format_alert(
109+
"Could not find coefficient column to apply `sup-t` adjustment."
110+
)
100111
return(params)
101112
}
102113
se_column <- intersect(c("SE", "std.error"), colnames(params))[1]
103114
if (is.na(se_column)) {
104-
insight::format_alert("Could not extract standard errors to apply `sup-t` adjustment.")
115+
insight::format_alert(
116+
"Could not extract standard errors to apply `sup-t` adjustment."
117+
)
105118
return(params)
106119
}
107120
p_column <- intersect(c("p", "p.value"), colnames(params))[1]
@@ -112,7 +125,9 @@
112125
ci_low_column <- intersect(c("CI_low", "conf.low"), colnames(params))[1]
113126
ci_high_column <- intersect(c("CI_high", "conf.high"), colnames(params))[1]
114127
if (is.na(ci_low_column) || is.na(ci_high_column)) {
115-
insight::format_alert("Could not extract confidence intervals to apply `sup-t` adjustment.")
128+
insight::format_alert(
129+
"Could not extract confidence intervals to apply `sup-t` adjustment."
130+
)
116131
return(params)
117132
}
118133
df_column <- intersect(c("df", "df_error"), colnames(params))[1]
@@ -122,18 +137,30 @@
122137
}
123138
# calculate updated confidence interval level, based on simultaenous
124139
# confidence intervals (https://onlinelibrary.wiley.com/doi/10.1002/jae.2656)
125-
crit <- mvtnorm::qmvt(ci_level, df = params[[df_column]][1], tail = "both.tails", corr = vc)$quantile
140+
crit <- mvtnorm::qmvt(
141+
ci_level,
142+
df = params[[df_column]][1],
143+
tail = "both.tails",
144+
corr = vc
145+
)$quantile
126146
# update confidence intervals
127147
params[[ci_low_column]] <- params[[coef_column]] - crit * params[[se_column]]
128148
params[[ci_high_column]] <- params[[coef_column]] + crit * params[[se_column]]
129149
# update p-values
130150
for (i in 1:nrow(params)) {
131-
params[[p_column]][i] <- 1 - mvtnorm::pmvt(
132-
lower = rep(-abs(stats::qt(params[[p_column]][i] / 2, df = params[[df_column]][i])), nrow(vc)),
133-
upper = rep(abs(stats::qt(params[[p_column]][i] / 2, df = params[[df_column]][i])), nrow(vc)),
134-
corr = vc,
135-
df = params[[df_column]][i]
136-
)
151+
params[[p_column]][i] <- 1 -
152+
mvtnorm::pmvt(
153+
lower = rep(
154+
-abs(stats::qt(params[[p_column]][i] / 2, df = params[[df_column]][i])),
155+
nrow(vc)
156+
),
157+
upper = rep(
158+
abs(stats::qt(params[[p_column]][i] / 2, df = params[[df_column]][i])),
159+
nrow(vc)
160+
),
161+
corr = vc,
162+
df = params[[df_column]][i]
163+
)
137164
}
138165
# clean up - remove temporary column
139166
params[[".sup_df"]] <- NULL
@@ -145,15 +172,19 @@
145172
.p_adjust_esarey <- function(x) {
146173
# only for slopes
147174
if (!inherits(x, c("estimate_slopes", "marginaleffects_slopes"))) {
148-
insight::format_error("The `esarey` p-value adjustment is only available for Johnson-Neyman intervals, i.e. when calling `estimate_slopes()` with an interaction term of two numeric predictors.") # nolint
175+
insight::format_error(
176+
"The `esarey` p-value adjustment is only available for Johnson-Neyman intervals, i.e. when calling `estimate_slopes()` with an interaction term of two numeric predictors."
177+
) # nolint
149178
}
150179
# get names of interaction terms
151180
pred <- attributes(x)$trend
152181
mod <- attributes(x)$by
153182

154183
# check for valid values - all must be numeric
155184
if (!all(vapply(attributes(x)$datagrid[c(pred, mod)], is.numeric, logical(1)))) {
156-
insight::format_error("The `esarey` p-value adjustment is only available for Johnson-Neyman intervals, i.e. when calling `estimate_slopes()` with an interaction term of two numeric predictors.") # nolint
185+
insight::format_error(
186+
"The `esarey` p-value adjustment is only available for Johnson-Neyman intervals, i.e. when calling `estimate_slopes()` with an interaction term of two numeric predictors."
187+
) # nolint
157188
}
158189

159190
int <- paste0(pred, ":", mod)
@@ -186,14 +217,17 @@
186217
# produces a sequence of marginal effects
187218
marginal_effects <- beta_pred + beta_int * range_sequence
188219
# SEs of those marginal effects
189-
me_ses <- sqrt(vcov_pred + (range_sequence^2) * vcov_int + 2 * range_sequence * vcov_pred_int)
220+
me_ses <- sqrt(
221+
vcov_pred + (range_sequence^2) * vcov_int + 2 * range_sequence * vcov_pred_int
222+
)
190223

191224
# t-values across range of marginal effects
192225
statistic <- marginal_effects / me_ses
193226
# degrees of freedom
194227
dof <- insight::get_df(model, type = "wald")
195228
# Get the minimum p values used in the adjustment
196-
pvalues <- 2 * pmin(stats::pt(statistic, df = dof), (1 - stats::pt(statistic, df = dof)))
229+
pvalues <- 2 *
230+
pmin(stats::pt(statistic, df = dof), (1 - stats::pt(statistic, df = dof)))
197231
# Multipliers
198232
multipliers <- seq_along(marginal_effects) / length(marginal_effects)
199233
# Order the pvals

R/summary.R

Lines changed: 41 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ summary.estimate_slopes <- function(object, verbose = TRUE, ...) {
44
by <- attributes(object)$by
55

66
if (verbose && nrow(out) < 50) {
7-
insight::format_alert("There might be too few data to accurately determine intervals. Consider setting `length = 100` (or larger) in your call to `estimate_slopes()`.") # nolint
7+
insight::format_alert(
8+
"There might be too few data to accurately determine intervals. Consider setting `length = 100` (or larger) in your call to `estimate_slopes()`."
9+
) # nolint
810
}
911

1012
# Add "Confidence" col based on the sig index present in the data
@@ -22,6 +24,10 @@ summary.estimate_slopes <- function(object, verbose = TRUE, ...) {
2224
out <- .estimate_slope_parts(out, by)
2325
}
2426

27+
if (anyNA(out$Confidence) && verbose) {
28+
insight::format_warning("Significance could not be determined for some intervals.")
29+
}
30+
2531
attributes(out) <- utils::modifyList(attributes(object), attributes(out))
2632
class(out) <- c("summary_estimate_slopes", "data.frame")
2733
attr(out, "table_title") <- c("Johnson-Neymann Intervals", "blue")
@@ -40,15 +46,23 @@ summary.reshape_grouplevel <- function(object, ...) {
4046

4147
# Utilities ===============================================================
4248

43-
4449
.estimate_slope_parts <- function(out, by) {
4550
# mark all "changes" from negative to positive and vice versa
4651
index <- 1
4752
out$switch <- index
4853
index <- index + 1
4954

5055
for (i in 2:nrow(out)) {
51-
if (out$Direction[i] != out$Direction[i - 1] || out$Confidence[i] != out$Confidence[i - 1]) {
56+
# we only proceed if we have non-missing values
57+
do_switch <- (!is.na(out$Direction[i - 1]) & !is.na(out$Direction[i])) &&
58+
out$Direction[i] != out$Direction[i - 1]
59+
60+
if (!do_switch) {
61+
do_switch <- (!is.na(out$Confidence[i - 1]) & !is.na(out$Confidence[i])) &&
62+
out$Confidence[i] != out$Confidence[i - 1]
63+
}
64+
65+
if (do_switch) {
5266
out$switch[i:nrow(out)] <- index # styler: off
5367
index <- index + 1
5468
}
@@ -57,14 +71,17 @@ summary.reshape_grouplevel <- function(object, ...) {
5771
# split into "switches"
5872
parts <- split(out, out$switch)
5973

60-
do.call(rbind, lapply(parts, function(i) {
61-
data.frame(
62-
Start = i[[by]][1],
63-
End = i[[by]][nrow(i)],
64-
Direction = i$Direction[1],
65-
Confidence = i$Confidence[1]
66-
)
67-
}))
74+
do.call(
75+
rbind,
76+
lapply(parts, function(i) {
77+
data.frame(
78+
Start = i[[by]][1],
79+
End = i[[by]][nrow(i)],
80+
Direction = i$Direction[1],
81+
Confidence = i$Confidence[1]
82+
)
83+
})
84+
)
6885
}
6986

7087

@@ -83,18 +100,27 @@ summary.reshape_grouplevel <- function(object, ...) {
83100

84101
if (confidence == "auto") {
85102
# TODO: make sure all of these work
86-
if ("BF" %in% names(x)) confidence <- "BF"
87-
if ("p" %in% names(x)) confidence <- "p"
103+
if ("BF" %in% names(x)) {
104+
confidence <- "BF"
105+
}
106+
if ("p" %in% names(x)) {
107+
confidence <- "p"
108+
}
88109
if ("pd" %in% names(x)) confidence <- "pd"
89110
}
90111

91-
switch(confidence,
112+
switch(
113+
confidence,
92114
p = tools::toTitleCase(effectsize::interpret_p(x$p, ...)),
93115
BF = tools::toTitleCase(effectsize::interpret_bf(x$BF, ...)),
94116
pd = tools::toTitleCase(effectsize::interpret_pd(x$pd, ...)),
95117
{
96118
# Based on CI
97-
out <- ifelse((x$CI_high < 0 & x$CI_low < 0) | (x$CI_high > 0 & x$CI_low > 0), "Significant", "Uncertain")
119+
out <- ifelse(
120+
(x$CI_high < 0 & x$CI_low < 0) | (x$CI_high > 0 & x$CI_low > 0),
121+
"Significant",
122+
"Uncertain"
123+
)
98124
factor(out, levels = c("Uncertain", "Significant"))
99125
}
100126
)

vignettes/introduction_comparisons_3.Rmd

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -270,12 +270,10 @@ plot(pr) + ggplot2::facet_wrap(~Illiteracy)
270270
For Johnson-Neyman intervals, which arise from interactions between two numeric predictors, we are essentially conducting multiple comparisons across the values of the moderator. To account for this, p-values can be adjusted to avoid an inflation of type-I errors. For `estimate_slopes()`, you can use the `p_adjust` argument for this purpose. While common methods like `"holm"` or `"bonferroni"` are available, for the specific case of Johnson-Neyman intervals, the `"esarey"` or `"sup-t"` adjustments are particularly recommended. The `"sup-t"` method, for instance, computes simultaneous confidence bands (Montiel Olea & Plagborg-Møller, 2019), providing a more rigorous test for the interval.
271271

272272
```{r}
273-
# we will force to calculate slopes at 200 values for "Illiteracy" using `length`
274273
slopes <- estimate_slopes(
275274
m_mod,
276275
"Murder",
277276
by = "Illiteracy",
278-
length = 200,
279277
p_adjust = "esarey"
280278
)
281279
summary(slopes)

0 commit comments

Comments
 (0)