10  Physics Task Accuracy

The tables below show that there was no effect of mindfulness training on physics task accuracy (Supplementary Table 10.1) or learning on the PFL (Supplementary Table 10.2).

10.1 Model Specification

Show/Hide Code
# Accuracy by problem solving task for parts 1-3

data_accuracy <- data_rq1 %>%
  filter(Part != "PFL") %>%
  pivot_wider(
    names_from = perception,
    values_from = rating
  ) %>%
  group_by(
    Participant,
    Condition,
    Gender,
    Cohort,
    Timepoint,
    Semester_Week,
    Test_Version,
    Part,
    Baseline_Threat,
    EMA_Threat
  ) %>%
  summarise(Score = mean(Score), .groups = "drop")

models_accuracy <- data_accuracy %>%
  group_by(Part) %>%
  nest %>%
  mutate(
    # Models with all covariates included
    mod_full = map(
      data,
      ~ lmer(
        Score ~
          Cohort +
            Semester_Week +
            Test_Version +
            Baseline_Threat +
            Gender +
            Timepoint * Condition +
            (1 | Participant),
        data = .x
      )
    ),
    # Models with no covariates
    mod_cov_removed = map(
      data,
      ~ lmer(
        Score ~ Timepoint * Condition * Gender + (1 | Participant),
        data = .x
      )
    ),
    # Models with gender interaction
    mod_gender_interact = map(
      data,
      ~ lmer(
        Score ~
          Cohort +
            Semester_Week +
            Test_Version +
            Baseline_Threat +
            Timepoint * Condition * Gender +
            (1 | Participant),
        data = .x
      )
    ),
    # Empty means models (only random effects)
    mod_empty_means = map(
      data,
      ~ lmer(
        Score ~ (1 | Participant),
        data = .x
      )
    ),
    # Compare 2-way and 3-way interaction models
    comparison = pmap(list(mod_full, mod_gender_interact), anova)
  ) %>%
  name_list_columns() # Name the list columns

# Accuracy on the PFL Task

data_pfl <- data_rq1 %>%
  filter(Part == "PFL") %>%
  pivot_wider(names_from = perception, values_from = rating) %>%
  mutate(
    Question = factor(Question - 10),
    Score = factor(Score)
  )

model_pfl <- glmer(
  Score ~
    Cohort +
      Semester_Week +
      Baseline_Threat +
      Gender +
      Question * Condition +
      (1 | Participant),
  data = data_pfl,
  family = "binomial",
  control = glmerControl(optimizer = "bobyqa")
)

10.2 Preregistered Hypotheses 1 and 4

Show/Hide Code
# tab_model(
#   models_accuracy$mod_full$Quantitative,
#   models_accuracy$mod_full$Categorization,
#   models_accuracy$mod_full$Qualitative,
#   show.se = T,
#   string.se = "SE",
#   show.ci = F,
#   dv.labels = c("Quantitative", "Categorization", "Qualitative") |>
#     (\(x) paste0("Part", 1:3, ": ", x))()
# )

handle_model_print(
  list(
    models_accuracy$mod_full$Quantitative,
    models_accuracy$mod_full$Categorization,
    models_accuracy$mod_full$Qualitative
  ),
  c("Quantitative", "Categorization", "Qualitative") |>
    (\(x) paste0("Part", 1:3, ": ", x))(),
  n_models = 3,
  is_lmer = TRUE,
  raneff_rownum = 11
)
Supplementary Table 10.1: Results from Mixed Effects Models Testing Preregistered Hypotheses 1 and 4: Effects of Mindfulness Training on Problem Solving Accuracy
  Part1: Quantitative Part2: Categorization Part3: Qualitative
Predictors Estimates SE p Estimates SE p Estimates SE p
(Intercept) 0.07 0.01 <0.001 0.38 0.02 <0.001 0.50 0.02 <0.001
Cohort [Cohort 2] 0.01 0.06 0.902 -0.11 0.08 0.178 -0.10 0.08 0.222
Cohort [Cohort 3] 0.07 0.05 0.213 -0.03 0.07 0.630 -0.08 0.07 0.288
Semester Week 0.01 0.01 0.339 -0.00 0.01 0.750 0.01 0.01 0.675
Test Version [B] 0.03 0.02 0.092 -0.10 0.02 <0.001 0.21 0.02 <0.001
Baseline Threat -0.04 0.01 <0.001 -0.00 0.01 0.766 -0.03 0.01 0.008
Gender [Women or Non-binary] 0.02 0.02 0.344 -0.01 0.03 0.774 -0.06 0.03 0.033
Timepoint [Posttest] 0.01 0.02 0.545 -0.03 0.02 0.205 0.07 0.02 0.003
Condition [Mindfulness] 0.02 0.03 0.385 0.04 0.04 0.292 -0.06 0.04 0.083
Timepoint [Posttest] × Condition [Mindfulness] -0.02 0.04 0.558 -0.05 0.04 0.277 0.04 0.05 0.369
Random Effects
σ2 0.03 0.04 0.04
τ00 0.00 Participant 0.01 Participant 0.01 Participant
ICC 0.04 0.20 0.15
N 149 Participant 149 Participant 149 Participant
Observations 295 298 298
Marginal R2 / Conditional R2 0.095 / 0.135 0.072 / 0.259 0.274 / 0.380
Note

Supplementary Table 10.1: The estimates for the intercept represent the overall mean score (percent correct) and standard errors for each of the problem solving performance outcomes at baseline. The estimate for timepoint represents the change in the dependent variable from baseline to posttest across

10.3 Preregistered Hypothesis 5

Show/Hide Code
# tab_model(
#   model_pfl,
#   show.se = T,
#   string.se = "SE",
#   show.ci = F,
#   dv.labels = "PFL Correctness"
# )

handle_model_print(
  list(
    model_pfl
  ),
  "PFL Correctness",
  n_models = 1,
  is_lmer = TRUE,
  raneff_rownum = 10,
  str_estimate = "Odds Ratios"
)
Supplementary Table 10.2: Results from Logistic Mixed Effects Model Testing Preregistered Hypothesis 5: Effects of Mindfulness Training on Learning During the Preparation for Future Learning Task
  PFL Correctness
Predictors Odds Ratios SE p
(Intercept) 0.23 0.06 <0.001
Cohort [Cohort 2] 1.13 1.04 0.894
Cohort [Cohort 3] 0.51 0.41 0.403
Semester Week 1.10 0.16 0.521
Baseline Threat 0.86 0.11 0.234
Gender [Women or Non-binary] 0.37 0.13 0.005
Question [2] 4.87 1.59 <0.001
Condition [Mindfulness] 1.42 0.64 0.433
Question [2] × Condition [Mindfulness] 0.91 0.52 0.873
Random Effects
σ2 3.29
τ00Participant 0.62
ICC 0.16
N Participant 149
Observations 298
Marginal R2 / Conditional R2 0.235 / 0.356
Note

Supplementary Table 10.2: The odds ratio for the intercept term represents the odds of getting question 1 correct compared to incorrect. The odds ratio for the question × condition interaction term represents the difference in odds of getting question 2 correct between conditions, above and beyond any condition differences on question 1 and overall differences on question 2, compared to question 1. P-values below .05 are indicated by bold font.