diff --git a/03-Study-1.qmd b/03-Study-1.qmd index 93ef4c5..509dc84 100644 --- a/03-Study-1.qmd +++ b/03-Study-1.qmd @@ -6,9 +6,15 @@ knitr::opts_chunk$set( message = FALSE, warning = FALSE ) +library(dplyr) +library(tidyr) +library(purrr) +library(stringr) +library(forcats) +library(readr) + library(Hmisc) # summarize namespace overlaps with dplyr library(here) -library(tidyverse) library(GGally) # Corregiogram library(corrr) library(kableExtra) @@ -43,54 +49,54 @@ s1e_df <- rio::import(here("data_public/Study1e_public.rds")) ## Preparing the table of measures ----------------------------------------- # get data s1_measures_path <- here("data_public", "Study1_measures.csv") -s1_measures_tbl <- read_csv(s1_measures_path) +s1_measures_tbl <- readr::read_csv(s1_measures_path) # add ronames -s1_measures_tbl <- s1_measures_tbl %>% mutate(id = row_number()) +s1_measures_tbl <- s1_measures_tbl %>% dplyr::mutate(id = dplyr::row_number()) # abbreviate the labels s1_measures_tbl <- s1_measures_tbl %>% - mutate( - Validity = str_replace(Validity, "Convergent", "Con."), - Validity = str_replace(Validity, "Discriminant", "Dis.") + dplyr::mutate( + Validity = stringr::str_replace(Validity, "Convergent", "Con."), + Validity = stringr::str_replace(Validity, "Discriminant", "Dis.") ) # Starting positions for packing pack_starts <- s1_measures_tbl %>% - mutate(rownum = row_number()) %>% - group_by(Study, `Main Measure`) %>% - mutate(Submeasure = row_number()) %>% - mutate( - start_num = case_when(Submeasure == 1 ~ rownum), - end_num = case_when(Submeasure == n() ~ rownum) + dplyr::mutate(rownum = dplyr::row_number()) %>% + dplyr::group_by(Study, `Main Measure`) %>% + dplyr::mutate(Submeasure = dplyr::row_number()) %>% + dplyr::mutate( + start_num = dplyr::case_when(Submeasure == 1 ~ rownum), + end_num = dplyr::case_when(Submeasure ==dplyr::n() ~ rownum) ) %>% - ungroup() %>% - fill(start_num, .direction = "down") %>% - fill(end_num, .direction = "up") %>% - filter(Submeasure == 1) %>% - mutate( + dplyr::ungroup() %>% + tidyr::fill(start_num, .direction = "down") %>% + tidyr::fill(end_num, .direction = "up") %>% + dplyr::filter(Submeasure == 1) %>% + dplyr::mutate( need_packing = (start_num != end_num), - `Subscale (if any)` = case_when(need_packing == FALSE ~ `Main Measure`) + `Subscale (if any)` = dplyr::case_when(need_packing == FALSE ~ `Main Measure`) ) %>% - mutate(labels = case_when(need_packing == TRUE ~ `Main Measure`, TRUE ~ " ")) %>% - mutate(intervals = end_num - start_num + 1) %>% - select(id, Study, labels, intervals) + dplyr::mutate(labels = dplyr::case_when(need_packing == TRUE ~ `Main Measure`, TRUE ~ " ")) %>% + dplyr::mutate(intervals = end_num - start_num + 1) %>% + dplyr::select(id, Study, labels, intervals) # Join Table s1_measures_tbl <- s1_measures_tbl %>% - left_join(pack_starts) %>% - rename( +dplyr::left_join(pack_starts) %>% +dplyr::rename( "Measure" = `Subscale (if any)`, "Construct" = `Measured Construct` ) # Clean NAs from the labels s1_measures_tbl <- s1_measures_tbl %>% - mutate(labels = case_when(is.na(labels) ~ "", TRUE ~ labels)) + dplyr::mutate(labels = dplyr::case_when(is.na(labels) ~ "", TRUE ~ labels)) pack_starts_nested <- s1_measures_tbl %>% - nest_by(Study) %>% - ungroup() + dplyr::nest_by(Study) %>% + dplyr::ungroup() # List of df to feed to the function in-text to render table on the spot s1_measures_dfs <- pack_starts_nested %>% - select(Study, data) %>% + dplyr::select(Study, data) %>% deframe_as_list() ``` @@ -406,7 +412,7 @@ included the valence item. ```{r s1a-correlations, include=FALSE} s1a_target_vars <- s1a_df %>% - select( + dplyr::select( heart, valence, CESD_sum, social_isolation_mean, bio_diff_mean, reactivity_mean, reactivity_perspective, reactivity_fantasy, reactivity_empathy, reactivity_distress, @@ -414,7 +420,7 @@ s1a_target_vars <- s1a_df %>% integrative_complexity_mean, multiple_identity_mean ) %>% - rename( +dplyr::rename( "Heart" = heart, "Valence" = valence, "CESD" = CESD_sum, @@ -432,7 +438,7 @@ s1a_target_vars <- s1a_df %>% "Multiple Identity" = multiple_identity_mean ) # Export Target Variable DF for the plots in Appendix -write_rds(s1a_target_vars, here("data_public/subset", "Study1a_public_target.rds")) +readr::write_rds(s1a_target_vars, here("data_public/subset", "Study1a_public_target.rds")) # Assign variable types s1a_var_types <- c( valence = "Convergent", @@ -457,26 +463,26 @@ s1a_cor_CIs <- s1a_target_vars %>% get_correlations(vars = 2:last_col(), outcome = Heart) ## Add Types s1a_cor_CIs <- s1a_cor_CIs %>% - mutate(type = s1a_var_types_long[label]) + dplyr::mutate(type = s1a_var_types_long[label]) ## Add outcome for later merging (Only Heart T1) s1a_cor_CIs <- s1a_cor_CIs %>% - mutate(outcome = "Heart T1") + dplyr::mutate(outcome = "Heart T1") ## Add Equivalence Test Results s1a_cor_CIs <- s1a_cor_CIs %>% - mutate(eq_decision = map2_chr(conf.low_model90, conf.high_model90, ~ evaluate_equivalence(.x, .y, ROPE_r))) %>% - mutate(eq_decision = case_when( + dplyr::mutate(eq_decision = map2_chr(conf.low_model90, conf.high_model90, ~ evaluate_equivalence(.x, .y, ROPE_r))) %>% + dplyr::mutate(eq_decision = dplyr::case_when( p.value_model95 >= .05 ~ eq_decision, TRUE ~ "" )) %>% # Add labels for plot annotation - mutate(eq_label = case_when( + dplyr::mutate(eq_label = dplyr::case_when( eq_decision == "Rejected" ~ "", eq_decision == "Accepted" ~ "∗", eq_decision == "Undecided" ~ "+" )) # Markdown-formatted APA Text (95% only) -s1a_txt <- s1a_cor_CIs %>% select(label, APA_model95) %>% - # `deframe()` to convert two-column df to a named vector +s1a_txt <- s1a_cor_CIs %>% dplyr::select(label, APA_model95) %>% + # `tibble::deframe()` to convert two-column df to a named vector deframe_as_list() ``` @@ -576,36 +582,36 @@ received partial course credits as compensation. ```{r s1b-participants} # Get the participants who participanted more than twice in one visit s1b_duplicates <- s1b_df %>% - group_by(id, visit) %>% - summarise(n = n()) %>% - filter(n > 1) + dplyr::group_by(id, visit) %>% +dplyr::summarise(n =dplyr::n()) %>% + dplyr::filter(n > 1) s1b_duplicates <- s1b_df %>% - group_by(id, visit) %>% - summarise(n = n()) %>% - filter(id %in% s1b_duplicates$id) -# s1b_df %>% filter(id == 306) -# s1b_df %>% filter(id == 402) + dplyr::group_by(id, visit) %>% +dplyr::summarise(n =dplyr::n()) %>% + dplyr::filter(id %in% s1b_duplicates$id) +# s1b_df %>% dplyr::filter(id == 306) +# s1b_df %>% dplyr::filter(id == 402) # Print out the different dates, if needed -# s1b_df %>% filter(id %in% s1b_duplicates$id) %>% select(id, visit, StartDate) %>% arrange(id, visit) +# s1b_df %>% dplyr::filter(id %in% s1b_duplicates$id) %>% dplyr::select(id, visit, StartDate) %>% arrange(id, visit) s1b_df <- s1b_df %>% # p 306, only include the first data point - filter(!(id == 306 & StartDate == lubridate::as_datetime("2016-11-18 11:34:00"))) %>% - filter(!(id == 402 & StartDate == lubridate::as_datetime("2016-09-22 12:31:00"))) -# filter(id == 306 | id == 402) + dplyr::filter(!(id == 306 & StartDate == lubridate::as_datetime("2016-11-18 11:34:00"))) %>% + dplyr::filter(!(id == 402 & StartDate == lubridate::as_datetime("2016-09-22 12:31:00"))) +# dplyr::filter(id == 306 | id == 402) s1b_dates <- s1b_df %>% pivot_wider(id, names_prefix = "visit", names_from = visit, values_from = StartDate) %>% - mutate( + dplyr::mutate( gap12 = visit2 - visit1, gap13 = visit3 - visit1, gap23 = visit3 - visit2 ) # Created a summary df showing average participation gaps between visits s1b_dates_summary <- s1b_dates %>% - summarise(across(c("gap12", "gap13", "gap23"), +dplyr::summarise(across(c("gap12", "gap13", "gap23"), .fns = list("mean" = ~ mean(., na.rm = T) %>% round(2)) )) %>% rowwise() %>% - mutate(gap_mean = mean(c(gap12_mean, gap13_mean, gap23_mean) %>% round(2))) + dplyr::mutate(gap_mean = mean(c(gap12_mean, gap13_mean, gap23_mean) %>% round(2))) ``` The dataset contained data from `r s1b_df %>% distinct(id) %>% nrow(.)` @@ -704,7 +710,7 @@ s1b_var_types <- c( # Select the variables that will be included in the correlation analysis s1b_target_variables <- s1b_df %>% - select( + dplyr::select( id, visit, heart, valence, @@ -732,9 +738,9 @@ s1b_target_variables <- s1b_df %>% CESD_sum ) %>% # Rename the variables for presentatioon - rename(all_of(s1b_labels)) +dplyr::rename(all_of(s1b_labels)) # Save the target variables DF for Appendix -write_rds(s1b_target_variables, here("data_public", "subset", "Study1b_public_target.rds")) +readr::write_rds(s1b_target_variables, here("data_public", "subset", "Study1b_public_target.rds")) # Labels keys df for later use s1b_target_vars_labels_df <- tibble( @@ -743,7 +749,7 @@ s1b_target_vars_labels_df <- tibble( ) # Append to the vartypes s1b_var_types <- s1b_var_types %>% - left_join(s1b_target_vars_labels_df) +left_join(s1b_target_vars_labels_df) # Create a longer version of the variable types df with labels # for correlation plot s1b_var_types_visits <- bind_rows( @@ -751,7 +757,7 @@ s1b_var_types_visits <- bind_rows( "T2" = s1b_var_types, "T3" = s1b_var_types, .id = "visit" ) %>% - mutate(label = paste0(label, " ", visit)) + dplyr::mutate(label = paste0(label, " ", visit)) # Bivariate Correlations ---------------------------------------------------------------- # Long to Wide @@ -766,39 +772,39 @@ s1b_cor_heart1 <- s1b_target_wide %>% vars = `Valence T1`:last_col(), outcome = `Heart T1` ) %>% - mutate(outcome = "Heart T1") + dplyr::mutate(outcome = "Heart T1") s1b_cor_heart2 <- s1b_target_wide %>% get_correlations( vars = `Valence T1`:last_col(), outcome = `Heart T2` ) %>% - mutate(outcome = "Heart T2") + dplyr::mutate(outcome = "Heart T2") s1b_cor_heart3 <- s1b_target_wide %>% get_correlations( vars = `Valence T1`:last_col(), outcome = `Heart T3` ) %>% - mutate(outcome = "Heart T3") + dplyr::mutate(outcome = "Heart T3") # Correlation s1b_cor_heart123 <- s1b_cor_heart1 %>% bind_rows(s1b_cor_heart2) %>% bind_rows(s1b_cor_heart3) %>% - select(outcome, everything()) + dplyr::select(outcome, everything()) # Add variable types and labels ---------------------------------------------------------------- # Types (Convergent, Discriminant) s1b_cor_CIs <- s1b_cor_heart123 %>% - left_join(s1b_var_types_visits) +left_join(s1b_var_types_visits) # Add Times Variable s1b_cor_CIs <- s1b_cor_CIs %>% # Extract Time - mutate(time = str_extract(label, "T\\d")) + dplyr::mutate(time = str_extract(label, "T\\d")) # Cache the table -s1b_cor_CIs %>% write_rds(here("data_public", "aggregate", "s1b_cor_CIs.rds")) +s1b_cor_CIs %>% readr::write_rds(here("data_public", "aggregate", "s1b_cor_CIs.rds")) # APA Text s1b_cor_txt <- s1b_cor_heart123 %>% - select(label, APA_model95) %>% - deframe() %>% + dplyr::select(label, APA_model95) %>% + tibble::deframe() %>% as.list() # MixedModel -------------------------------------------------------------------- @@ -813,17 +819,17 @@ s1b_dfC <- s1b_df %>% ~ . - mean(., na.rm = TRUE) ) %>% # Visit As Factor - mutate(visit = as.factor(visit)) + dplyr::mutate(visit = as.factor(visit)) # Mixed model table - (Not run) # s1b_lm_df <- tibble(predictor = s1b_labels[3:length(s1b_labels)]) %>% -# mutate(formula = paste0("heart ~ 1 + ", predictor, " + visit + (1|id)")) %>% -# mutate(model = map(formula, ~lmer(formula = ., +# dplyr::mutate(formula = paste0("heart ~ 1 + ", predictor, " + visit + (1|id)")) %>% +# dplyr::mutate(model = map(formula, ~lmer(formula = ., # data = s1b_dfC)), # broom = map(model, ~broom.mixed::tidy(.)), # target_pred = map(broom, ~.[2,])) %>% # # Calculate confidence intervals - parm = 4 for the predictor -# mutate(ci95 = map(model, ~confint.merMod(parm = 4, ., method = "Wald", level = .95) %>% +# dplyr::mutate(ci95 = map(model, ~confint.merMod(parm = 4, ., method = "Wald", level = .95) %>% # as_tibble() %>% rename_with(~c("lower", "upper"))), # ci90 = map(model, ~confint.merMod(parm = 4, ., method = "Wald", level = .90) %>% as_tibble() %>% # as_tibble() %>% rename_with(~c("lower", "upper")))) %>% @@ -839,11 +845,11 @@ s1b_df_std <- s1b_dfC %>% ~ (scale(.) %>% as.vector()) ) %>% # Visit As Factor - mutate(visit = as.factor(visit)) + dplyr::mutate(visit = as.factor(visit)) # Get the table of the standardized model s1b_lm_df_std <- tibble(predictor = s1b_labels[3:length(s1b_labels)]) %>% - mutate(formula = paste0("heart ~ 1 + ", predictor, " + visit + (1|id)")) %>% - mutate( + dplyr::mutate(formula = paste0("heart ~ 1 + ", predictor, " + visit + (1|id)")) %>% + dplyr::mutate( model = map(formula, ~ lmer( formula = ., data = s1b_df_std @@ -852,7 +858,7 @@ s1b_lm_df_std <- tibble(predictor = s1b_labels[3:length(s1b_labels)]) %>% target_pred = map(broom, ~ .[2, ]) ) %>% # Calculate confidence intervals - parm = 4 for the predictor, Wald to save time - mutate( + dplyr::mutate( ci95 = map(model, ~ confint.merMod(parm = 4, ., method = "Wald", level = .95) %>% as_tibble() %>% rename_with(~ c("lower", "upper"))), @@ -868,22 +874,22 @@ s1b_lm_df_std <- tibble(predictor = s1b_labels[3:length(s1b_labels)]) %>% s1b_lm_df_std <- s1b_lm_df_std %>% add_eqtest() # Add Types s1b_mixed_mods_std <- s1b_lm_df_std %>% - left_join(s1b_var_types) +left_join(s1b_var_types) # Add predictor names -s1b_mixed_mods_std <- s1b_mixed_mods_std %>% mutate(pred_label = names(predictor)) +s1b_mixed_mods_std <- s1b_mixed_mods_std %>% dplyr::mutate(pred_label = names(predictor)) # Add Convergent vs Discriminant Labels s1b_mixed_mods_std <- s1b_mixed_mods_std %>% - left_join(s1b_var_types) +left_join(s1b_var_types) # Add APA s1b_lm_df_std <- s1b_lm_df_std %>% - mutate( + dplyr::mutate( described = map(model, ~ describe.glm(., dtype = 3)), APA_pred = map_chr(described, ~ .[2, "str"]) ) # Get a list of APA-formatted texts s1b_lm_txt <- s1b_lm_df_std %>% - select(predictor, APA_pred) %>% - deframe() %>% + dplyr::select(predictor, APA_pred) %>% + tibble::deframe() %>% as.list() ``` @@ -1079,47 +1085,47 @@ s1c_var_types <- c( tibble(predictor = names(.), type = .) # Subset the data to target variables only s1c_target_vars <- s1c_df %>% - select(all_of(s1c_target_vars_labels)) + dplyr::select(all_of(s1c_target_vars_labels)) # Save -s1c_target_vars %>% write_rds(here("data_public", "subset", "Study1c_public_target.rds")) +s1c_target_vars %>% readr::write_rds(here("data_public", "subset", "Study1c_public_target.rds")) # Labels keys df for later use s1c_target_vars_labels_df <- tibble( predictor = s1c_target_vars_labels, label = names(s1c_target_vars_labels) ) # Append labels to the validity types -s1c_var_types <- s1c_var_types %>% left_join(s1c_target_vars_labels_df) +s1c_var_types <- s1c_var_types %>%left_join(s1c_target_vars_labels_df) # Bivariate Correlation Analysis ------------------------------------------------ # Correlations with Heart 1-3 s1c_cor_heart1 <- s1c_target_vars %>% get_correlations(vars = `Valence T1`:last_col(), outcome = `Heart T1`) %>% - mutate(outcome = "Heart T1") + dplyr::mutate(outcome = "Heart T1") s1c_cor_heart2 <- s1c_target_vars %>% get_correlations(vars = `Valence T1`:last_col(), outcome = `Heart T2`) %>% - mutate(outcome = "Heart T2") + dplyr::mutate(outcome = "Heart T2") s1c_cor_heart3 <- s1c_target_vars %>% get_correlations(vars = `Valence T1`:last_col(), outcome = `Heart T3`) %>% - mutate(outcome = "Heart T3") + dplyr::mutate(outcome = "Heart T3") # Correlation s1c_cor_heart123 <- s1c_cor_heart1 %>% bind_rows(s1c_cor_heart2) %>% bind_rows(s1c_cor_heart3) %>% - select(outcome, everything()) + dplyr::select(outcome, everything()) # Add types s1c_cor_CIs <- s1c_cor_heart123 %>% - left_join(s1c_var_types) +left_join(s1c_var_types) # Add Times Variable s1c_cor_CIs <- s1c_cor_CIs %>% - mutate(time = str_extract(label, "T\\d")) -s1c_cor_CIs %>% write_rds(here("data_public", "aggregate", "s1c_cor_CIs.rds")) + dplyr::mutate(time = str_extract(label, "T\\d")) +s1c_cor_CIs %>% readr::write_rds(here("data_public", "aggregate", "s1c_cor_CIs.rds")) # APA text container s1c_txt <- s1c_cor_CIs %>% - mutate(outcome_shorthand = str_extract(outcome, "..$")) + dplyr::mutate(outcome_shorthand = str_extract(outcome, "..$")) # List s1c_bivariate_APA <- s1c_txt %>% - mutate(label = paste(outcome, label)) %>% - select(label, APA_model95) %>% + dplyr::mutate(label = paste(outcome, label)) %>% + dplyr::select(label, APA_model95) %>% deframe_as_list() @@ -1133,20 +1139,20 @@ s1c_T2_vars <- names(s1c_target_vars_labels) %>% # Get the table of models and summary (without loop) s1c_T2_mods <- tibble(predictor = s1c_T2_vars[2:length(s1c_T2_vars)]) %>% # Formula - use scale() to standardize on the spot - mutate(formula = paste0( + dplyr::mutate(formula = paste0( "scale(heart2) ~ scale(", predictor, ") + ", "acceptanceEC + closenessEC + acceptanceEC * closenessEC" )) %>% - mutate(model = map(formula, ~ lm( + dplyr::mutate(model = map(formula, ~ lm( formula = ., # Use the standardized data for standardized coefs data = s1c_df ))) # Brooming to get the summary stats -s1c_T2_mods <- s1c_T2_mods %>% mutate(broom = map(model, ~ broom::tidy(.))) +s1c_T2_mods <- s1c_T2_mods %>% dplyr::mutate(broom = map(model, ~ broom::tidy(.))) # Extract CI s1c_T2_mods <- s1c_T2_mods %>% - mutate( + dplyr::mutate( ci95 = map(model, ~ confint(., parm = 2, level = 0.95) %>% as_tibble() %>% rename_with(~ c("lower", "upper"))), @@ -1154,7 +1160,7 @@ s1c_T2_mods <- s1c_T2_mods %>% as_tibble() %>% rename_with(~ c("lower", "upper"))) ) %>% - mutate(broom_scale = map(broom, ~ .[2, ])) %>% + dplyr::mutate(broom_scale = map(broom, ~ .[2, ])) %>% unnest(broom_scale) %>% unnest(c(ci95, ci90), names_sep = "_") # Equivalence test @@ -1162,20 +1168,20 @@ s1c_T2_mods <- s1c_T2_mods %>% add_eqtest() # Add labels for variables s1c_T2_mods <- s1c_T2_mods %>% - mutate(names = names(predictor)) + dplyr::mutate(names = names(predictor)) # Add Types s1c_T2_mods <- s1c_T2_mods %>% - left_join(s1c_var_types, by = "predictor") +left_join(s1c_var_types, by = "predictor") # Add Markdown s1c_T2_mods <- s1c_T2_mods %>% - mutate( + dplyr::mutate( described = map(model, ~ describe.glm(., dtype = 3)), APA_pred = map_chr(described, ~ .[2, "str"]) ) # Deframe to get a list of Markdown text s1c_T2_txt <- s1c_T2_mods %>% - select(predictor, APA_pred) %>% - deframe() %>% + dplyr::select(predictor, APA_pred) %>% + tibble::deframe() %>% as.list() ``` @@ -1216,7 +1222,7 @@ dominance (`r s1c_T2_txt$dominance`). See @fig-s1c-T2reg-plot for the forest plo # Plot - regression at T2 s1c_T2_mods %>% # Reorder Names for Scales (in-order then reverse) - mutate(names = fct_inorder(names) %>% fct_rev()) %>% + dplyr::mutate(names = fct_inorder(names) %>% fct_rev()) %>% ggplot(aes(y = names, x = estimate, color = type)) + # Lines for Zero and SESOI geom_zeroSESOI + @@ -1248,15 +1254,15 @@ s1c_T2_mods %>% ```{r s1c-mixed-model} # Target vars for the mixed model - only valence is measured three times - no looping s1c_df_long <- s1c_df %>% - select(id, acceptance, closeness, grouping_dummy, heart1:heart3, valence1:valence3) %>% + dplyr::select(id, acceptance, closeness, grouping_dummy, heart1:heart3, valence1:valence3) %>% pivot_longer(c(heart1:heart3, valence1:valence3)) %>% - mutate( + dplyr::mutate( time = str_extract(name, "\\d"), - name = str_replace(name, "\\d", "") + name = stringr::str_replace(name, "\\d", "") ) %>% pivot_wider(names_from = name, values_from = value) # Cache the long file -s1c_df_long %>% write_rds(here("data_public", "s1c_df_long.rds")) +s1c_df_long %>% readr::write_rds(here("data_public", "s1c_df_long.rds")) # Construct a mixed model s1c_mixed_mod <- lmer( formula = scale(heart) ~ scale(valence) + grouping_dummy * time + (1 | id), @@ -1269,8 +1275,8 @@ s1c_mixed_mods <- tibble( model = list(s1c_mixed_mod), broom = list(broom.mixed::tidy(s1c_mixed_mod)) ) %>% - mutate(broom_pred = map(broom, ~ .[2, ])) %>% - mutate( + dplyr::mutate(broom_pred = map(broom, ~ .[2, ])) %>% + dplyr::mutate( ci95 = map(model, ~ confint(., parm = 4, level = 0.95) %>% as_tibble() %>% rename_with(~ c("lower", "upper"))), @@ -1279,7 +1285,7 @@ s1c_mixed_mods <- tibble( rename_with(~ c("lower", "upper"))) ) %>% # Add labels for the variable - mutate(names = names(predictor)) %>% + dplyr::mutate(names = names(predictor)) %>% # Unnest unnest(broom_pred) %>% unnest(c(ci95, ci90), names_sep = "_") @@ -1287,17 +1293,17 @@ s1c_mixed_mods <- tibble( s1c_mixed_mods <- s1c_mixed_mods %>% add_eqtest() # Add Convergent Discriminant Types s1c_mixed_mods <- s1c_mixed_mods %>% - mutate(type = c("Convergent")) + dplyr::mutate(type = c("Convergent")) # Add Markdown s1c_mixed_mods <- s1c_mixed_mods %>% - mutate( + dplyr::mutate( described = map(model, ~ describe.glm(., dtype = 3)), APA_pred = map_chr(described, ~ .[2, "str"]) ) # Deframe to get a list of Markdown text s1c_mixed_txt <- s1c_mixed_mods %>% - select(predictor, APA_pred) %>% - deframe() %>% + dplyr::select(predictor, APA_pred) %>% + tibble::deframe() %>% as.list() ``` @@ -1354,33 +1360,33 @@ s1c_T3heart_plot ```{r s1c-sensitivity-mixed} ### Mixed model s1c_sensitivity_mixed_df <- tibble(outcome = c("heart", "valence")) %>% - mutate(formula = paste0(outcome, "~ acceptance * time + (1|id)")) %>% - mutate(model = map(formula, ~ lmer(formula = ., data = s1c_df_long))) %>% - mutate( + dplyr::mutate(formula = paste0(outcome, "~ acceptance * time + (1|id)")) %>% + dplyr::mutate(model = map(formula, ~ lmer(formula = ., data = s1c_df_long))) %>% + dplyr::mutate( tidy = map(model, ~ broom.mixed::tidy(.)), APA = map(model, ~ describe.glm(.)) ) %>% - mutate( + dplyr::mutate( emm_time_acceptance = map(model, ~ emmeans(., pairwise ~ time, by = "acceptance")), emm_time_acceptance_tidy = map(emm_time_acceptance, function(x) { x$contrasts %>% tidy() %>% - mutate(APA = describe_t_val(t_stat = statistic, df = df, p_value = adj.p.value)) %>% - mutate(acceptance = case_when( + dplyr::mutate(APA = describe_t_val(t_stat = statistic, df = df, p_value = adj.p.value)) %>% + dplyr::mutate(acceptance = dplyr::case_when( acceptance == "0" ~ "Rejection", acceptance == "1" ~ "Acceptance" )) %>% unite("label", c(acceptance, contrast)) }) ) %>% - mutate(emm_APA = map(emm_time_acceptance_tidy, function(x) { + dplyr::mutate(emm_APA = map(emm_time_acceptance_tidy, function(x) { x %>% - select(label, APA) %>% + dplyr::select(label, APA) %>% deframe_as_list() })) # Deframe as list to get a list s1c_sensitivity_heart_mod_APA <- s1c_sensitivity_mixed_df %>% - select(outcome, emm_APA) %>% + dplyr::select(outcome, emm_APA) %>% deframe_as_list() # Heart Manikin scores over time @@ -1507,15 +1513,15 @@ s1d_var_types <- c( # Rename the conditions s1d_df <- s1d_df %>% - mutate(confederate_desire = case_when( + dplyr::mutate(confederate_desire = dplyr::case_when( expectancy == "Acceptance" ~ "High Confederate Desire", expectancy == "Rejection" ~ "Low Confederate Desire" )) # Subset the data to target variables only s1d_df_target_only <- s1d_df %>% - select(all_of(s1d_target_vars_labels)) + dplyr::select(all_of(s1d_target_vars_labels)) # Chache the file for Appendix -s1d_df_target_only %>% write_rds(here("data_public", "subset", "Study1d_public_target.rds")) +s1d_df_target_only %>% readr::write_rds(here("data_public", "subset", "Study1d_public_target.rds")) # Labels keys df for later use s1d_target_vars_labels_df <- tibble( predictor = s1d_target_vars_labels, @@ -1523,10 +1529,10 @@ s1d_target_vars_labels_df <- tibble( ) # Append to the vartypes s1d_var_types <- s1d_var_types %>% - left_join(s1d_target_vars_labels_df) +left_join(s1d_target_vars_labels_df) # Reorder the variables (forcats::) s1d_var_types <- s1d_var_types %>% - mutate(label = fct_inorder(label)) + dplyr::mutate(label = fct_inorder(label)) # Descriptives via my function s1d_descriptives <- s1d_df %>% @@ -1536,15 +1542,15 @@ s1d_descriptives <- s1d_df %>% # APA Style s1d_desc_APA <- s1d_descriptives %>% get_APA_from_msd(group_cols = c("expectancy", "rejection")) s1d_msd <- s1d_desc_APA %>% - mutate(label = paste0( + dplyr::mutate(label = paste0( str_extract(expectancy, "^."), str_extract(rejection, "^."), str_remove(variable, " ") )) # List of Means and SDs (order: expectancy(A/R), rejection (A/R), and variable(A/R)) s1d_desc_txt <- s1d_msd %>% - select(label, APA) %>% - deframe() %>% + dplyr::select(label, APA) %>% + tibble::deframe() %>% as.list() ``` @@ -1584,25 +1590,25 @@ Time 4 questionnaires in Table @tbl-s1d-table. See s1d_heart_vars <- c("Heart T1", "Heart T2", "Heart T3", "Heart T4") # Tibble to gather correlation models each Time do get_correlations s1d_cor_CIs <- tibble(outcome = s1d_heart_vars) %>% - mutate(cors = map(outcome, ~ get_correlations(s1d_df_target_only, + dplyr::mutate(cors = map(outcome, ~ get_correlations(s1d_df_target_only, vars = `Valence T1`:last_col(), outcome = . ))) %>% unnest() # Add types -s1d_cor_CIs <- s1d_cor_CIs %>% left_join(s1d_var_types) +s1d_cor_CIs <- s1d_cor_CIs %>%left_join(s1d_var_types) # Add Times Variable -s1d_cor_CIs <- s1d_cor_CIs %>% mutate(time = str_extract(label, "T\\d")) +s1d_cor_CIs <- s1d_cor_CIs %>% dplyr::mutate(time = str_extract(label, "T\\d")) # Cache the aggregate -s1d_cor_CIs %>% write_rds(here("data_public", "aggregate", "s1d_cor_CIs.rds")) +s1d_cor_CIs %>% readr::write_rds(here("data_public", "aggregate", "s1d_cor_CIs.rds")) # APA text container s1d_txt <- s1d_cor_CIs %>% - mutate(outcome_shorthand = str_extract(outcome, "..$")) %>% - mutate(var_outcome = paste(label, outcome, sep = "_") %>% str_replace_all(" ", "")) + dplyr::mutate(outcome_shorthand = str_extract(outcome, "..$")) %>% + dplyr::mutate(var_outcome = paste(label, outcome, sep = "_") %>% str_replace_all(" ", "")) # Deframe to a list s1d_txt <- s1d_txt %>% - select(var_outcome, APA_model95) %>% - deframe() %>% + dplyr::select(var_outcome, APA_model95) %>% + tibble::deframe() %>% as.list() ``` @@ -1632,44 +1638,44 @@ s1d_dfC <- s1d_df %>% ) # Effect Coding s1d_dfC <- s1d_dfC %>% - mutate( - confederate_desire_ec = case_when( + dplyr::mutate( + confederate_desire_ec = dplyr::case_when( confederate_desire == "High Confederate Desire" ~ 0.5, confederate_desire == "Low Confederate Desire" ~ -0.5 ), - rejection_ec = case_when( + rejection_ec = dplyr::case_when( rejection == "Rejection" ~ .5, rejection == "Acceptance" ~ -.5 ) ) # Prepare a long dataset s1d_dfC_long <- s1d_dfC %>% - select( + dplyr::select( id, confederate_desire, rejection, confederate_desire_ec, rejection_ec, heart_T1:heart_T4, valence_T1:valence_T4, arousal_T1:arousal_T4, dominance_T1:dominance_T4 ) %>% pivot_longer(matches("T[1-4]")) %>% - mutate( + dplyr::mutate( time = str_extract(name, "\\d"), - name = str_replace(name, "_.\\d", "") + name = stringr::str_replace(name, "_.\\d", "") ) %>% pivot_wider(names_from = name, values_from = value) # Cache -s1d_dfC_long %>% write_rds(here("data_public", "s1d_dfC_long.rds")) +s1d_dfC_long %>% readr::write_rds(here("data_public", "s1d_dfC_long.rds")) # Lmer model s1d_mixed_preds <- c("heart", "valence", "arousal", "dominance") # Create a table of formula and their associated results s1d_mixed_df <- tibble(predictor = s1d_mixed_preds[2:length(s1d_mixed_preds)]) %>% - mutate(formula = paste0("scale(heart) ~ 1 + scale(", predictor, ") + time + (1|id)")) %>% - mutate( + dplyr::mutate(formula = paste0("scale(heart) ~ 1 + scale(", predictor, ") + time + (1|id)")) %>% + dplyr::mutate( model = map(formula, ~ lmer(formula = ., data = s1d_dfC_long)), broom = map(model, ~ broom.mixed::tidy(.)), target_pred = map(broom, ~ .[2, ]) ) %>% # Calculate confidence intervals - parm = 4 for the predictor - mutate( + dplyr::mutate( ci95 = map(model, ~ confint.merMod(parm = 4, ., method = "Wald", level = .95) %>% as_tibble() %>% rename_with(~ c("lower", "upper"))), @@ -1682,20 +1688,20 @@ s1d_mixed_df <- tibble(predictor = s1d_mixed_preds[2:length(s1d_mixed_preds)]) % # Predictor names s1d_mixed_df <- s1d_mixed_df %>% - mutate(pred_label = c("Valence", "Arousal", "Dominance")) %>% + dplyr::mutate(pred_label = c("Valence", "Arousal", "Dominance")) %>% # Add Types - mutate(type = c("Convergent", "Discriminant", "Discriminant")) + dplyr::mutate(type = c("Convergent", "Discriminant", "Discriminant")) # APA s1d_mixed_df <- s1d_mixed_df %>% - mutate( + dplyr::mutate( described = map(model, ~ describe.glm(., dtype = 3)), APA_pred = map_chr(described, ~ .[2, "str"]) ) # Deframe to get a list of Markdown text s1d_mixed_txt <- s1d_mixed_df %>% - select(predictor, APA_pred) %>% - deframe() %>% + dplyr::select(predictor, APA_pred) %>% + tibble::deframe() %>% as.list() ``` @@ -1752,9 +1758,9 @@ ggplot( s1d_sensitivity_t <- t.test(formula = heart_T3 ~ rejection, data = s1d_df) # get means and sd only for rejection vs acceptance s1d_sensitivity_APA <- describe_by_factor(s1d_df, rejection, vars = heart_T3) %>% - mutate(msd = to_msd(heart_T3_mean, heart_T3_sd)) %>% - select(rejection, msd) %>% - deframe() %>% + dplyr::mutate(msd = to_msd(heart_T3_mean, heart_T3_sd)) %>% + dplyr::select(rejection, msd) %>% + tibble::deframe() %>% as.list() s1d_sensitivity_d <- effectsize::cohens_d(x = heart_T3 ~ rejection, data = s1d_df) # Get the RMD object @@ -1798,29 +1804,29 @@ s1d_T3heart_plot ```{r s1d-sensitivity-mixed} ### Mixed model s1d_sensitivity_mixed_df <- tibble(outcome = c("heart", "valence")) %>% - mutate(formula = paste0(outcome, "~ rejection * time + (1|id)")) %>% - mutate(model = map(formula, ~ lmer(formula = ., data = s1d_dfC_long))) %>% - mutate( + dplyr::mutate(formula = paste0(outcome, "~ rejection * time + (1|id)")) %>% + dplyr::mutate(model = map(formula, ~ lmer(formula = ., data = s1d_dfC_long))) %>% + dplyr::mutate( tidy = map(model, ~ broom.mixed::tidy(.)), APA = map(model, ~ describe.glm(.)) ) %>% - mutate( + dplyr::mutate( emm_time_acceptance = map(model, ~ emmeans(., pairwise ~ time, by = "rejection")), emm_time_acceptance_tidy = map(emm_time_acceptance, function(x) { x$contrasts %>% tidy() %>% - mutate(APA = describe_t_val(t_stat = statistic, df = df, p_value = adj.p.value)) %>% + dplyr::mutate(APA = describe_t_val(t_stat = statistic, df = df, p_value = adj.p.value)) %>% unite("label", c(rejection, contrast)) }) ) %>% - mutate(emm_APA = map(emm_time_acceptance_tidy, function(x) { + dplyr::mutate(emm_APA = map(emm_time_acceptance_tidy, function(x) { x %>% - select(label, APA) %>% + dplyr::select(label, APA) %>% deframe_as_list() })) # Deframe as list to get a list s1d_sensitivity_heart_mod_APA <- s1d_sensitivity_mixed_df %>% - select(outcome, emm_APA) %>% + dplyr::select(outcome, emm_APA) %>% deframe_as_list() @@ -1870,13 +1876,13 @@ s1_measures_dfs$e %>% s1_render_kable(studykey = "e") ```{r s1e-date-calc} s1e_df <- s1e_df %>% - mutate(D1D2_diff = lubridate::interval( + dplyr::mutate(D1D2_diff = lubridate::interval( StartDate_D1, StartDate_D2 )) s1e_D1D2diff <- s1e_df %>% - summarise(mean(D1D2_diff)) %>% - pull() %>% +dplyr::summarise(mean(D1D2_diff)) %>% +dplyr::pull() %>% lubridate::as.duration() %>% as.numeric("days") %>% round(2) @@ -2059,16 +2065,16 @@ s1e_var_types <- c( "nts_T5_mean" = "Convergent" ) %>% tibble(predictor = names(.), type = .) %>% - mutate( + dplyr::mutate( predictor = fct_inorder(predictor), type = factor(type, levels = c("Convergent", "Convergent (R)", "Discriminant")) ) # Subset the data to target variables only s1e_df_target_only <- s1e_df %>% - select(all_of(s1e_target_vars_labels)) + dplyr::select(all_of(s1e_target_vars_labels)) # Chache the file for Appendix -s1e_df_target_only %>% write_rds(here("data_public", "subset", "Study1e_public_target.rds")) +s1e_df_target_only %>% readr::write_rds(here("data_public", "subset", "Study1e_public_target.rds")) # Labels keys df for later use s1e_target_vars_labels_df <- tibble( @@ -2077,7 +2083,7 @@ s1e_target_vars_labels_df <- tibble( ) # Append to the vartypes s1e_var_types <- s1e_var_types %>% - left_join(s1e_target_vars_labels_df) +left_join(s1e_target_vars_labels_df) # Correlation analysis --------------------------------------------------------- # Create CIs for Correlations @@ -2088,42 +2094,42 @@ s1e_heart_vars <- c( ) # Tibble to gather correlation models each Time do get_correlations s1e_cor_CIs <- tibble(outcome = s1e_heart_vars) %>% - mutate(cors = map(outcome, ~ get_correlations(s1e_df_target_only, + dplyr::mutate(cors = map(outcome, ~ get_correlations(s1e_df_target_only, vars = `Valence T1`:`NTS Meaning T5`, outcome = . ))) %>% unnest(cors) # Add validity types s1e_cor_CIs <- s1e_cor_CIs %>% - left_join(s1e_var_types) +left_join(s1e_var_types) # Add Times Variable s1e_cor_CIs <- s1e_cor_CIs %>% - mutate(time = str_extract(label, "T\\d")) + dplyr::mutate(time = str_extract(label, "T\\d")) ## Add Equivalence Test Results s1e_cor_CIs <- s1e_cor_CIs %>% - mutate(eq_decision = map2_chr(conf.low_model90, conf.high_model90, ~ evaluate_equivalence(.x, .y, ROPE_r))) %>% + dplyr::mutate(eq_decision = map2_chr(conf.low_model90, conf.high_model90, ~ evaluate_equivalence(.x, .y, ROPE_r))) %>% # Delete equivalence tests for non-significant results - mutate(eq_decision = case_when( + dplyr::mutate(eq_decision = dplyr::case_when( p.value_model95 >= .05 ~ eq_decision, TRUE ~ "" )) %>% # Add labels for plot annotation - mutate(eq_label = case_when( + dplyr::mutate(eq_label = dplyr::case_when( eq_decision == "Rejected" ~ "", eq_decision == "Accepted" ~ "∗", eq_decision == "Undecided" ~ "+" )) # Cache the df -s1e_cor_CIs %>% write_rds(here("data_public", "aggregate", "s1e_cor_CIs.rds")) +s1e_cor_CIs %>% readr::write_rds(here("data_public", "aggregate", "s1e_cor_CIs.rds")) # Markdown-formatted APA Text (95% only) s1e_txt <- s1e_cor_CIs %>% - mutate(label = paste(outcome, label)) %>% - select(label, APA_model95) %>% + dplyr::mutate(label = paste(outcome, label)) %>% + dplyr::select(label, APA_model95) %>% deframe_as_list() ## 90%CI version for equivalence test s1e_txt_90 <- s1e_cor_CIs %>% - mutate(label = paste(outcome, label)) %>% - select(label, APA_model90) %>% + dplyr::mutate(label = paste(outcome, label)) %>% + dplyr::select(label, APA_model90) %>% deframe_as_list() @@ -2149,12 +2155,12 @@ subjective socioeconomic status ```{r s1e-mixed} # Standardize the dataset s1e_df_std <- s1e_df %>% - select( + dplyr::select( id, participant_desire, confederate_desire, desires_dummy, rejection, all_of(as.vector(s1e_target_vars_labels)) ) %>% # Standardize - mutate(across( + dplyr::mutate(across( all_of(as.vector(s1e_target_vars_labels)), ~ scale(.) %>% as.vector() )) @@ -2169,12 +2175,12 @@ s1e_df_long <- s1e_df %>% )), matches("nts_.._mean") )) %>% - mutate( + dplyr::mutate( time = str_extract(name, "\\d"), - name = str_replace(name, "_.\\d", "") + name = stringr::str_replace(name, "_.\\d", "") ) %>% pivot_wider(names_from = name, values_from = value) -s1e_df_long %>% write_rds(here("data_public", "s1e_df_long.rds")) +s1e_df_long %>% readr::write_rds(here("data_public", "s1e_df_long.rds")) # Prepare a long dataset s1e_df_std_long <- s1e_df_std %>% pivot_longer(c( @@ -2186,9 +2192,9 @@ s1e_df_std_long <- s1e_df_std %>% )), matches("nts_.._mean") )) %>% - mutate( + dplyr::mutate( time = str_extract(name, "\\d"), - name = str_replace(name, "_.\\d", "") + name = stringr::str_replace(name, "_.\\d", "") ) %>% pivot_wider(names_from = name, values_from = value) @@ -2201,14 +2207,14 @@ s1e_mixed_preds <- c( ) # Create a table of formula and their associated results s1e_mixed_df <- tibble(predictor = s1e_mixed_preds[2:length(s1e_mixed_preds)]) %>% - mutate(formula = paste0("heart ~ 1 + ", predictor, " + time + desires_dummy * rejection * time + (1|id)")) %>% - mutate( + dplyr::mutate(formula = paste0("heart ~ 1 + ", predictor, " + time + desires_dummy * rejection * time + (1|id)")) %>% + dplyr::mutate( model = map(formula, ~ lmer(formula = ., data = s1e_df_std_long)), broom = map(model, ~ broom.mixed::tidy(.)), target_pred = map(broom, ~ .[2, ]) ) %>% # Calculate confidence intervals - parm = 4 for the predictor - mutate( + dplyr::mutate( ci95 = map(model, ~ confint.merMod(parm = 4, ., method = "Wald", level = .95) %>% as_tibble() %>% rename_with(~ c("lower", "upper"))), @@ -2220,31 +2226,31 @@ s1e_mixed_df <- tibble(predictor = s1e_mixed_preds[2:length(s1e_mixed_preds)]) % unnest(c(ci95, ci90), names_sep = "_") # Add labels s1e_mixed_df <- s1e_mixed_df %>% - mutate(pred_label = c( + dplyr::mutate(pred_label = c( "Valence", "Arousal", "Dominance", "NTS Belonging", "NTS Self-Esteem", "NTS Control", "NTS Meaning", "NTS Overall" )) # Add Type s1e_var_types_long <- s1e_var_types %>% - mutate(predictor = str_remove(predictor, "_T\\d")) %>% - select(-label) %>% + dplyr::mutate(predictor = str_remove(predictor, "_T\\d")) %>% + dplyr::select(-label) %>% distinct() s1e_mixed_df <- s1e_mixed_df %>% - left_join(s1e_var_types_long, by = "predictor") +left_join(s1e_var_types_long, by = "predictor") # Add Plot Order for predictors s1e_mixed_df <- s1e_mixed_df %>% - mutate(plot_order = row_number()) + dplyr::mutate(plot_order = dplyr::row_number()) # APA s1e_mixed_df <- s1e_mixed_df %>% - mutate( + dplyr::mutate( described = map(model, ~ describe.glm(., dtype = 3)), APA_pred = map_chr(described, ~ .[2, "str"]) ) # Deframe to get a list of Markdown text s1e_mixed_APA <- s1e_mixed_df %>% - select(predictor, APA_pred) %>% + dplyr::select(predictor, APA_pred) %>% deframe_as_list() ``` @@ -2279,7 +2285,7 @@ Manikins. s1e_mixed_df %>% # Order by decreasing order by validity type, and order by the scale arrange(desc(type), desc(plot_order)) %>% - mutate(pred_label = fct_inorder(pred_label)) %>% + dplyr::mutate(pred_label = fct_inorder(pred_label)) %>% ggplot(aes(y = pred_label, x = estimate, color = type)) + # Add lines for zero and SESOI geom_zeroSESOI + @@ -2316,10 +2322,10 @@ s1e_sensitivity_mod <- t.test( ) # get means and sd only for rejection vs acceptance s1e_sensitivity_APA <- describe_by_factor(s1e_df, rejection, vars = heart_T5) %>% - mutate(msd = to_msd(heart_T5_mean, heart_T5_sd)) %>% - select(rejection, msd) %>% - mutate(rejection = to_factor(rejection)) %>% - deframe() %>% + dplyr::mutate(msd = to_msd(heart_T5_mean, heart_T5_sd)) %>% + dplyr::select(rejection, msd) %>% + dplyr::mutate(rejection = to_factor(rejection)) %>% + tibble::deframe() %>% as.list() # Get the cohens d and ci s1e_sensitivity_dci <- effectsize::cohens_d(x = heart_T5 ~ rejection, data = s1e_df) @@ -2345,7 +2351,7 @@ Heart Manikin scores than the control participants #| label: fig-s1e-belonging-plot # Study 1e (EVv1) - Outcome: Heart Manikin at Time 5 s1e_T5_heart <- s1e_df %>% - mutate(rejection = to_factor(rejection)) %>% + dplyr::mutate(rejection = to_factor(rejection)) %>% ggplot(aes(x = rejection, y = heart_T5, color = rejection)) + # Render the default violin plot default_violin + @@ -2396,12 +2402,12 @@ s1e_NTS_T5_vars <- c( ) s1e_NTS_T5_df <- tibble(outcome = s1e_NTS_T5_vars) %>% - mutate(formula = (paste0(outcome, " ~ rejection"))) %>% - mutate(fit = map(formula, ~ t.test( + dplyr::mutate(formula = (paste0(outcome, " ~ rejection"))) %>% + dplyr::mutate(fit = map(formula, ~ t.test( formula = as.formula(.), data = s1e_df ))) %>% - mutate( + dplyr::mutate( broom = map(fit, ~ broom::tidy(.)), ci95 = map(formula, ~ effectsize::cohens_d( x = as.formula(.), @@ -2413,24 +2419,24 @@ s1e_NTS_T5_df <- tibble(outcome = s1e_NTS_T5_vars) %>% )) ) %>% # Get APA - mutate(t_APA = map_chr(fit, describe.ttest)) %>% - mutate(across(c(ci95, ci90), list(APA = ~ map_chr(., describe_d_ci)))) %>% - mutate(td_APA = str_c(t_APA, ci95_APA, sep = ", ")) + dplyr::mutate(t_APA = map_chr(fit, describe.ttest)) %>% + dplyr::mutate(across(c(ci95, ci90), list(APA = ~ map_chr(., describe_d_ci)))) %>% + dplyr::mutate(td_APA = str_c(t_APA, ci95_APA, sep = ", ")) # RMD s1e_NTS_APA <- s1e_NTS_T5_df %>% - select(outcome, td_APA) %>% - deframe() %>% + dplyr::select(outcome, td_APA) %>% + tibble::deframe() %>% as.list() # Means and SDs for per conditions s1e_NTS_desc <- s1e_df %>% describe_by_factor(rejection, vars = all_of(s1e_NTS_T5_vars)) %>% - mutate(rejection = to_factor(rejection)) %>% + dplyr::mutate(rejection = to_factor(rejection)) %>% get_APA_from_msd(group_cols = "rejection") %>% - mutate(variable = str_remove(variable, "_mean")) %>% - mutate(label = paste0(str_extract(rejection, "^."), variable)) + dplyr::mutate(variable = str_remove(variable, "_mean")) %>% + dplyr::mutate(label = paste0(str_extract(rejection, "^."), variable)) # Get the descriptives into the list format and append it to the APA list for rmd s1e_NTS_APA <- s1e_NTS_APA %>% - append(s1e_NTS_desc %>% select(label, APA) %>% deframe() %>% as.list()) + append(s1e_NTS_desc %>% dplyr::select(label, APA) %>% tibble::deframe() %>% as.list()) ``` @@ -2450,7 +2456,7 @@ was effective. See @fig-appendix-s1e-belonging-plot in [Appendix] for the Heart s1e_NTS_long <- s1e_df %>% pivot_longer(matches("T5.+mean")) %>% # labels - mutate(labels = fct_recode(name, + dplyr::mutate(labels = fct_recode(name, "Belonging" = "nts_T5_belonging_mean", "Self-Esteem" = "nts_T5_esteem_mean", "Control" = "nts_T5_control_mean", @@ -2494,7 +2500,7 @@ existence, or overall fundamental need. ```{r probing-rejection-manipulations} -s1c_df_rej <- s1c_df %>% filter(acceptance == "0") +s1c_df_rej <- s1c_df %>% dplyr::filter(acceptance == "0") s1c_heart_time_dci <- s1c_df_rej %>% effectsize::cohens_d(x = .$heart1, y = .$heart2, paired = T) # APA-formatted t and d values with CI's s1c_heart_time <- list( diff --git a/04-Study-2.qmd b/04-Study-2.qmd index 6cfd2c9..1926fe0 100644 --- a/04-Study-2.qmd +++ b/04-Study-2.qmd @@ -11,7 +11,13 @@ knitr::opts_chunk$set( warning = FALSE ) # Load packages -------------------------------------------------------------- -library(tidyverse) +library(dplyr) +library(tidyr) +library(purrr) +library(stringr) +library(forcats) +library(readr) + library(codebook) # to generate codebook library(here) library(car) # for the type III SS in ANOVA @@ -34,7 +40,7 @@ s2_df <- load_s2_df() s2_df_long <- read_rds(here("data_public", "Study2_public_long.rds")) s2_df_long <- s2_df_long %>% - mutate(essay_condition = factor(essay_condition, + dplyr::mutate(essay_condition = factor(essay_condition, levels = c("Social Surrogacy", "Non-Social Surrogacy") )) @@ -45,7 +51,7 @@ s1c_sensitivity_APA <- get_s1c_sensitivity_APA(s1c_df) # Descriptives Table --------------------------------------------------------------------------- ## Describe by essay cndition and get mean sd descriptives_table <- s2_df %>% - filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% + dplyr::filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% describe_by_factor(essay_condition, vars = c( T1_Heart_1, T1_Valence_1, T1_Arousal_1, T1_Dominance_1, @@ -55,7 +61,7 @@ descriptives_table <- s2_df %>% ) ) %>% # Clean Names - rename_all(.funs = ~ str_replace(., "_1_", "_")) + rename_all(.funs = ~ stringr::str_replace(., "_1_", "_")) ## Convert descriptives to long via ad-hoc function descriptives_long <- descriptives_table %>% get_desc_db() # e.g.,: get_stats("Non-Social Surrogacy", "Enjoyment", "sd") @@ -63,12 +69,12 @@ get_stats <- function(..., d = descriptives_long) d[list(...), value] ## Build APA descriptives descriptives_APA <- descriptives_long %>% - select(-name) %>% + dplyr::select(-name) %>% pivot_wider( id_cols = c(variable, essay_condition), values_from = value, names_from = stat ) %>% - mutate(APA = to_msd(mean %>% f.round(), sd %>% f.round())) %>% + dplyr::mutate(APA = to_msd(mean %>% f.round(), sd %>% f.round())) %>% data.table::setDT(key = c("essay_condition", "variable")) get_s2_msd <- function(..., d = descriptives_APA) d[list(...), APA] @@ -86,8 +92,8 @@ s2_attention_APA <- s2_df |> s2_count_participants_by_attention_check() # Total number of participants that followed the VG Instructions followed_VG_inst <- s2_df %>% - group_by(followed_VGinstructions) %>% - summarise(n = n()) %>% + dplyr::group_by(followed_VGinstructions) %>% +dplyr::summarise(n =dplyr::n()) %>% deframe_as_list() ## Append the overall rater agreement followed_VG_inst <- followed_VG_inst %>% @@ -101,25 +107,25 @@ followed_VG_inst <- followed_VG_inst %>% # Analysis N s2_analytic_N <- s2_df %>% - filter((attention_all_correct == TRUE) & (followed_VGinstructions == "Yes")) %>% + dplyr::filter((attention_all_correct == TRUE) & (followed_VGinstructions == "Yes")) %>% nrow() # Demographics (Age, number of gender identities, the number of egender ) s2_age_mean <- s2_df %>% - filter((attention_all_correct == TRUE) & (followed_VGinstructions == "Yes")) %>% + dplyr::filter((attention_all_correct == TRUE) & (followed_VGinstructions == "Yes")) %>% dplyr::summarize(m = mean(Age) %>% round(2), sd = sd(Age)) %>% - mutate(msd = to_msd(m, sd)) + dplyr::mutate(msd = to_msd(m, sd)) # Gender identity s2_gender_id <- s2_df %>% - group_by(Gender_Identity_3GP) %>% - summarise(n = n()) %>% + dplyr::group_by(Gender_Identity_3GP) %>% +dplyr::summarise(n =dplyr::n()) %>% deframe_as_list() # Racial Identity (8 groups) s2_race8_id <- s2_df %>% - group_by(Race_8GP) %>% - summarise(n = n()) %>% + dplyr::group_by(Race_8GP) %>% +dplyr::summarise(n =dplyr::n()) %>% deframe_as_list() # Racial identity (all questions) -s2_race_multi <- s2_df %>% summarise(across(starts_with("Race"), ~ is.na(.) %>% sum())) +s2_race_multi <- s2_df %>%dplyr::summarise(across(starts_with("Race"), ~ is.na(.) %>% sum())) ``` ### Participants @@ -201,7 +207,7 @@ I excluded any participants who did not complete the entire study or failed the # Main Analysis t-test t_results <- s2_df %>% # Include only those who passed the attention check - filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% + dplyr::filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% # t-test comparing the T2 belonging between the essay conditions t.test( formula = T2_Heart_1 ~ essay_condition, @@ -211,7 +217,7 @@ t_results <- s2_df %>% # Cohen's d - 90%CI main_cohens_d_95 <- s2_df %>% # Include only those who passed the attention check - filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% + dplyr::filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% effectsize::cohens_d( x = T2_Heart_1 ~ essay_condition, data = ., ci = .95 @@ -220,7 +226,7 @@ main_cohens_d_95 <- s2_df %>% # Cohen's d - 90%CI main_cohens_d <- s2_df %>% # Include only those who passed the attention check - filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% + dplyr::filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% effectsize::cohens_d( x = T2_Heart_1 ~ essay_condition, data = ., ci = .90 @@ -262,7 +268,7 @@ heart2_txt <- append( # Plot plot_T2_Heart <- s2_df %>% # Only include people who got the attention check right - filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% + dplyr::filter(followed_VGinstructions == "Yes" & attention_all_correct == TRUE) %>% ggplot(aes( x = essay_condition, color = essay_condition, y = T2_Heart_1 @@ -298,7 +304,7 @@ Since the obtained *p*-value was greater than .05, I performed the two one-sided # Probing Effectiveness of Rejection Induction. # T-test effectivenss <- s2_df %>% - filter( + dplyr::filter( attention_all_correct == TRUE, followed_VGinstructions == "Yes", essay_condition == "Non-Social Surrogacy" ) %>% @@ -363,7 +369,7 @@ heart12_txt <- heart12_txt %>% # Plot among the non-social surrogate nonsurrogate_heart12 <- s2_df_long %>% - filter(essay_condition == "Non-Social Surrogacy") %>% + dplyr::filter(essay_condition == "Non-Social Surrogacy") %>% # Start the ggplot 2 (data from the above) ggplot(aes(x = Time, y = Heart, color = Time)) + # Violin, errorbar, datapoints @@ -386,7 +392,7 @@ nonsurrogate_heart12 # Parasocial Relationships Manipulation Check ------------------------------ # Chi-square test parasocial_MC_results <- s2_df %>% - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% { chisq.test( x = .$essay_condition, @@ -395,10 +401,10 @@ parasocial_MC_results <- s2_df %>% } # Summary table parasocial_MC_results_df <- s2_df %>% - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% - group_by(essay_condition_title, parasocial_MC_group) %>% - summarise(n = n()) %>% - mutate( + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::group_by(essay_condition_title, parasocial_MC_group) %>% +dplyr::summarise(n =dplyr::n()) %>% + dplyr::mutate( prop_per_cond = n / sum(n), percent_per_cond = prop_per_cond * 100 %>% round(2) ) @@ -407,17 +413,17 @@ parasocial_MC_results_df <- s2_df %>% # Welch's t-test PSI_MC_results <- s2_df %>% # Exclude those failed attention check - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% t.test(formula = PSI ~ essay_condition, data = .) # Cohen's d PSI_d_95 <- s2_df %>% # Exclude those failed attention check - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% effectsize::cohens_d(x = PSI ~ essay_condition, data = ., ci = .95) # Plot plot_PSI <- s2_df %>% # Exclude those failed attention check - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% ggplot(aes(x = essay_condition, y = PSI, color = essay_condition)) + # Violin with errorbar default_violin + @@ -434,17 +440,17 @@ plot_PSI <- s2_df %>% # Single-Item Immersion Manipulation Check ----------------------------------- # Welch's t-test for Immersion Scale immersion_MC_results <- s2_df %>% - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% t.test(formula = Single_Immersion ~ essay_condition, data = .) # Cohen's d immersion_d_95 <- s2_df %>% # Exclude those failed attention check - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% effectsize::cohens_d(x = Single_Immersion ~ essay_condition, data = ., ci = .95) # Plot plot_immersion <- s2_df %>% # Exclude those failed attention check - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% ggplot(aes(x = essay_condition, y = Single_Immersion, color = essay_condition)) + # Violin with errorbar default_violin + @@ -458,12 +464,12 @@ plot_immersion <- s2_df %>% # Narrative Engagement Scale Manipulation Check ----------------------------------- NE_MC_results <- s2_df %>% - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% t.test(formula = Narrative_Engagement ~ essay_condition, data = .) # Plot plot_NE <- s2_df %>% # Exclude those failed attention check - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% ggplot(aes(x = essay_condition, y = Narrative_Engagement, color = essay_condition)) + # Violin with errorbar default_violin + @@ -478,12 +484,12 @@ plot_NE <- s2_df %>% # Narrative Engagement Scale Manipulation Check ----------------------------------- OTFWorld_MC_results <- s2_df %>% - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% t.test(formula = OTF_Social_World ~ essay_condition, data = .) # Plot plot_OTFWorld <- s2_df %>% # Exclude those failed attention check - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% ggplot(aes(x = essay_condition, y = OTF_Social_World, color = essay_condition)) + # Violin with errorbar default_violin + @@ -511,29 +517,29 @@ parasocial_MC_txt <- parasocial_MC_txt %>% parasocial_MC_txt$chisq_glance$p.value %>% round(2) ), nonsurrogacy_noint = parasocial_MC_results_df %>% - filter(essay_condition_title == "Non-Social Surrogacy" & + dplyr::filter(essay_condition_title == "Non-Social Surrogacy" & parasocial_MC_group == "No Interaction with NPC") %>% - pull(percent_per_cond) %>% round(2), +dplyr::pull(percent_per_cond) %>% round(2), nonsurrogacy_noPSR = parasocial_MC_results_df %>% - filter(essay_condition_title == "Non-Social Surrogacy" & + dplyr::filter(essay_condition_title == "Non-Social Surrogacy" & parasocial_MC_group == "No Parasocial Relationship with NPC") %>% - pull(percent_per_cond) %>% round(2), +dplyr::pull(percent_per_cond) %>% round(2), nonsurrogacy_yesPSR = parasocial_MC_results_df %>% - filter(essay_condition_title == "Non-Social Surrogacy" & + dplyr::filter(essay_condition_title == "Non-Social Surrogacy" & parasocial_MC_group == "Formed Parasocial Relationship with NPC") %>% - pull(percent_per_cond) %>% round(2), +dplyr::pull(percent_per_cond) %>% round(2), surrogacy_noint = parasocial_MC_results_df %>% - filter(essay_condition_title == "Social Surrogacy" & + dplyr::filter(essay_condition_title == "Social Surrogacy" & parasocial_MC_group == "No Interaction with NPC") %>% - pull(percent_per_cond) %>% round(2), +dplyr::pull(percent_per_cond) %>% round(2), surrogacy_noPSR = parasocial_MC_results_df %>% - filter(essay_condition_title == "Social Surrogacy" & + dplyr::filter(essay_condition_title == "Social Surrogacy" & parasocial_MC_group == "No Parasocial Relationship with NPC") %>% - pull(percent_per_cond) %>% round(2), +dplyr::pull(percent_per_cond) %>% round(2), surrogacy_yesPSR = parasocial_MC_results_df %>% - filter(essay_condition_title == "Social Surrogacy" & + dplyr::filter(essay_condition_title == "Social Surrogacy" & parasocial_MC_group == "Formed Parasocial Relationship with NPC") %>% - pull(percent_per_cond) %>% round(2) +dplyr::pull(percent_per_cond) %>% round(2) )) # PSI - APA Text @@ -596,10 +602,10 @@ OTF_txt <- list( #| label: fig-exp-mosaic # Plot - Mosaic Plot plot_para_chi <- s2_df %>% - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% - mutate( + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::mutate( parasocial_MC_group_shortlabel = - case_when( + dplyr::case_when( parasocial_MC_group == "No Interaction with NPC" ~ "No Interaction", parasocial_MC_group == "No Parasocial Relationship with NPC" ~ "Interacted, but no PSR", parasocial_MC_group == "Formed Parasocial Relationship with NPC" ~ "Formed PSR" @@ -655,7 +661,7 @@ Note that the Inclusion of Self in the Other Scale [@aronInclusionOtherSelf1992] # t-test enjoyment_results <- s2_df %>% # Exclude attention fails - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% t.test( formula = Enjoyment ~ essay_condition, data = . @@ -678,7 +684,7 @@ enjoyment_txt <- list( # Plot enjoyment_plot <- s2_df %>% # Exclude attention fails - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% ggplot(aes(x = essay_condition, y = Enjoyment, color = essay_condition)) + # Default Violin default_violin + @@ -704,8 +710,8 @@ enjoyment_plot ```{r s2-manikins-across-time} # T-tests across manikin variables manikin_ttests <- s2_df %>% - filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% - summarise(across( + dplyr::filter(attention_all_correct == TRUE, followed_VGinstructions == "Yes") %>% +dplyr::summarise(across( starts_with(c("T1", "T2")), list(ttest = function(x) { list(t.test(x ~ essay_condition)) @@ -719,7 +725,7 @@ manikin_ttests <- s2_df %>% # Extract APA ttests manikin_ttests <- manikin_ttests %>% - mutate( + dplyr::mutate( tidy = map(value, ~ broom::tidy(.)), APA = map_chr(value, ~ describe.ttest(.)) ) %>% @@ -728,7 +734,7 @@ manikin_ttests <- manikin_ttests %>% # Two-way Mixed ANOVA Comparing Manikin Scores Across Time x Essay Conditions manikin_mixed_ANOVAs <- s2_df_long %>% - summarise(across( +dplyr::summarise(across( c("Heart", "Valence", "Arousal", "Dominance"), list( ANOVA = function(x) { @@ -748,7 +754,7 @@ manikin_mixed_ANOVAs <- s2_df_long %>% )) %>% pivot_longer(everything()) %>% # Conver to a format with one row = one variable - mutate( + dplyr::mutate( Variable = str_extract(name, "^[^_].+(?=_)"), Stat = str_match(name, "(?:[^_](?!_))+$") ) %>% @@ -760,11 +766,11 @@ manikin_mixed_ANOVAs <- s2_df_long %>% # Extract Relevant Stats from the anova object manikin_ANOVA_DT <- manikin_mixed_ANOVAs %>% - mutate(ANOVA = map(ANOVA, ~ as_tibble(.))) %>% + dplyr::mutate(ANOVA = map(ANOVA, ~ as_tibble(.))) %>% unnest(ANOVA) %>% - select(Variable:ges, -`p<.05`) %>% + dplyr::select(Variable:ges, -`p<.05`) %>% # Create the APA-compliant text - mutate(APA = sprintf( + dplyr::mutate(APA = sprintf( "_F_(%s,%s) = %s, _p_ %s, $\\eta^2_{G}$ %s", DFn, DFd, `F` %>% apastats::f.round(2), apastats::round.p(p), apastats::round.p(ges) @@ -775,7 +781,7 @@ get_manikin_ANOVA_APA <- function(..., d = manikin_ANOVA_DT) d[list(...), APA] # T-test Results manikin_ttests_DT <- manikin_mixed_ANOVAs %>% - select(Variable, pairTimed) %>% + dplyr::select(Variable, pairTimed) %>% unnest(pairTimed) # Function to Plot Manikin @@ -884,13 +890,13 @@ s2_mod_by_mc <- expand_grid( moderator = s2_mc_indices ) # Join labels -s2_mod_by_mc <- s2_mod_by_mc %>% left_join(s2_var_labels, by = c("moderator" = "varname")) +s2_mod_by_mc <- s2_mod_by_mc %>%left_join(s2_var_labels, by = c("moderator" = "varname")) # Turn off ordered factor s2_df_long <- s2_df_long %>% - mutate(parasocial_MC_group = fct_inorder(parasocial_MC_group, ordered = FALSE)) + dplyr::mutate(parasocial_MC_group = fct_inorder(parasocial_MC_group, ordered = FALSE)) # IOS, immersion to continuous s2_df_long <- s2_df_long %>% - mutate( + dplyr::mutate( IOS_num = as.character(IOS) %>% as.numeric(), Single_Immersion_num = Single_Immersion %>% as.character() %>% as.numeric() ) @@ -910,24 +916,24 @@ s2_df_long <- s2_df_long %>% # Mean-Centering the moderators s2_df_long <- s2_df_long %>% # group by time will actually grand-mean center since the moderators are between scales - group_by(Time) %>% - mutate(across(all_of(s2_mc_indices[-1]), + dplyr::group_by(Time) %>% + dplyr::mutate(across(all_of(s2_mc_indices[-1]), .fns = list(c = function(x) x - mean(x, na.rm = TRUE)) )) %>% - ungroup() + dplyr::ungroup() # add "_c" to moderator names s2_mod_by_mc <- s2_mod_by_mc %>% - mutate(moderator = case_when( + dplyr::mutate(moderator = dplyr::case_when( moderator == "parasocial_MC_group" ~ moderator, TRUE ~ paste0(moderator, "_c") )) # Prepare formula s2_mod_by_mc <- s2_mod_by_mc %>% - mutate(formula = paste0(outcome, " ~ ", moderator, " * essay_condition * Time + (1|PID)")) %>% - mutate(contrasts = case_when( + dplyr::mutate(formula = paste0(outcome, " ~ ", moderator, " * essay_condition * Time + (1|PID)")) %>% + dplyr::mutate(contrasts = dplyr::case_when( moderator == "parasocial_MC_group" ~ list( parasocial_MC_group = contr.sum, essay_condition = contr.sum, @@ -939,29 +945,29 @@ s2_mod_by_mc <- s2_mod_by_mc %>% ) %>% list() )) %>% # run mixed model - contrasts - mutate(model = map2(formula, contrasts, ~ lmer(formula = .x, data = s2_df_long, contrasts = .y))) %>% + dplyr::mutate(model = map2(formula, contrasts, ~ lmer(formula = .x, data = s2_df_long, contrasts = .y))) %>% # tidy the model results - mutate( + dplyr::mutate( tidy = map(model, broom.mixed::tidy), APA = map(model, ~ describe.glm(., dtype = 3)) ) # Output as a list s2_mod_mc_APA <- s2_mod_by_mc %>% - mutate(APA = map(APA, function(x) { + dplyr::mutate(APA = map(APA, function(x) { x %>% - select(eff, str) %>% + dplyr::select(eff, str) %>% deframe_as_list() })) %>% - select(outcome, moderator, APA) %>% + dplyr::select(outcome, moderator, APA) %>% # Group by outcome to perform deframe as list only among each group - group_by(outcome) %>% + dplyr::group_by(outcome) %>% # Create a named list of moderator and the APA results, grouped by outcome - mutate(APA = cur_data() %>% select(moderator, APA) %>% deframe_as_list()) %>% + dplyr::mutate(APA = cur_data() %>% dplyr::select(moderator, APA) %>% deframe_as_list()) %>% # nest so that each row will be outcome. data column is a tibble of moderator and APA nest(data = c(moderator, APA)) %>% # For each data tibble, deframe as list - mutate(data = map(data, ~ deframe_as_list(.))) %>% + dplyr::mutate(data = map(data, ~ deframe_as_list(.))) %>% # Deframe the entire tibble as list deframe_as_list() @@ -978,8 +984,8 @@ For the model with the Inclusion of the Other in Self scores as a moderator, the # Coefficients predicting Heart s2_mod_plots <- s2_mod_by_mc %>% - filter(outcome == "Heart") %>% - pull(model) %>% + dplyr::filter(outcome == "Heart") %>% +dplyr::pull(model) %>% map(plot_s2_moderation) ggarrange(plotlist = s2_mod_plots, ncol = 2, nrow = 4, labels = "AUTO") %>% annotate_figure( diff --git a/05-Study-3.qmd b/05-Study-3.qmd index 2572317..dcf643b 100644 --- a/05-Study-3.qmd +++ b/05-Study-3.qmd @@ -76,12 +76,21 @@ knitr::opts_chunk$set( ) # Load packages +library(dplyr) +library(tidyr) +library(purrr) +library(stringr) +library(forcats) +library(readr) +library(tibble) + + library(here) source(here("r", "namifunc.R")) # Read User-Defined Functions # Load Public Data ---------------------------------------- -s3_df <- read_rds(here("data_public", "Study3_public.rds")) %>% filter(Finished == 1) -s3_df_attentive <- s3_df %>% filter(attention_all_correct == TRUE) +s3_df <- read_rds(here("data_public", "Study3_public.rds")) %>% dplyr::filter(Finished == 1) +s3_df_attentive <- s3_df %>% dplyr::filter(attention_all_correct == TRUE) ``` ### Participants @@ -97,41 +106,41 @@ s3_df_msd <- s3_df_attentive %>% describe_by_factor(parasocial, social_world, ) ) %>% # Clean Names - rename_all(.funs = ~ str_replace(., "_1_", "_")) + rename_all(.funs = ~ stringr::str_replace(., "_1_", "_")) # Means and Standard Devisions for Each conditions s3_df_msd <- get_APA_from_msd(s3_df_msd) %>% data.table::setDT(key = c("parasocial", "social_world", "variable", "APA")) # Play time-Based Exclusion: R_3nN7adJevbEtL0R had more than 7000 minutes playing the game s3_playtime_exclusion <- s3_df %>% - filter(RPG_playtime > 7000) + dplyr::filter(RPG_playtime > 7000) # Play time Calculation s3_playtime <- s3_df_attentive %>% # One record taking more than 7000 minutes was removed from the data - filter(RPG_playtime < 7000) %>% - summarise( + dplyr::filter(RPG_playtime < 7000) %>% +dplyr::summarise( mean_playtime = mean(RPG_playtime, na.rm = TRUE) %>% round(2), sd_playtime = sd(RPG_playtime, na.rm = TRUE) %>% round(2) ) %>% - mutate(APA = to_msd(mean_playtime, sd_playtime)) %>% + dplyr::mutate(APA = to_msd(mean_playtime, sd_playtime)) %>% as.list() # Getting participants per condition s3_ntable <- s3_df %>% - group_by(parasocial, social_world) %>% - summarise(n = n()) %>% - mutate(key = str_c(str_extract(parasocial, "^."), str_extract(social_world, "^."))) + dplyr::group_by(parasocial, social_world) %>% +dplyr::summarise(n =dplyr::n()) %>% + dplyr::mutate(key = str_c(str_extract(parasocial, "^."), str_extract(social_world, "^."))) s3_n_txt <- s3_ntable %>% - ungroup() %>% - select(key, n) %>% - deframe() %>% + dplyr::ungroup() %>% + dplyr::select(key, n) %>% + tibble::deframe() %>% as.list() %>% append(list(all_participated = s3_ntable$n %>% sum())) # Exclusions by Attention Check s3_attention_df <- s3_df %>% - group_by(attention_all_correct) %>% - summarise(n = n()) %>% + dplyr::group_by(attention_all_correct) %>% +dplyr::summarise(n =dplyr::n()) %>% deframe_as_list() s3_atteion_by_group <- s3_df %>% @@ -142,12 +151,12 @@ s3_atteion_by_group <- s3_df %>% attention_all = !attention_rejection_correct & !attention_parasocial_correct & !attention_social_world_correct ) s3_attntion_by_type <- s3_atteion_by_group %>% - summarise(across(everything(), sum)) %>% +dplyr::summarise(across(everything(), sum)) %>% as.list() s3_attention_by_type <- s3_df %>% - filter(Finished == 1) %>% - summarise(across( + dplyr::filter(Finished == 1) %>% +dplyr::summarise(across( c( attention_rejection_correct, attention_parasocial_correct, attention_social_world_correct @@ -156,8 +165,8 @@ s3_attention_by_type <- s3_df %>% )) ## Excluded Participants s3_n_excluded <- list( - all = s3_df %>% filter(Finished == 1) %>% - summarise(sum(!attention_all_correct)) %>% deframe(), + all = s3_df %>% dplyr::filter(Finished == 1) %>% +dplyr::summarise(sum(!attention_all_correct)) %>% tibble::deframe(), HH = NA, HL = NA, LH = NA, @@ -165,11 +174,11 @@ s3_n_excluded <- list( ) # Get n and percent by coditions s3_excluded <- s3_df %>% - filter(!(attention_all_correct == TRUE)) %>% + dplyr::filter(!(attention_all_correct == TRUE)) %>% get_n_pct(parasocial, social_world) # Create a df with included participants -s3_included <- s3_df %>% filter(attention_all_correct == TRUE) +s3_included <- s3_df %>% dplyr::filter(attention_all_correct == TRUE) # Gender and Race ------------------------------------------------------ # Get the n and percent lists from the user-defined function @@ -394,12 +403,12 @@ s3_att_v3change_date <- lubridate::as_datetime("2021-03-24 11:45:00") # ge the number of participants after this date s3_att <- list( - v1 = s3_df %>% filter(StartDate < s3_att_v2change_date) %>% nrow(), - v2 = s3_df %>% filter( + v1 = s3_df %>% dplyr::filter(StartDate < s3_att_v2change_date) %>% nrow(), + v2 = s3_df %>% dplyr::filter( StartDate > s3_att_v2change_date, StartDate < s3_att_v3change_date ) %>% nrow(), - v3 = s3_df %>% filter(StartDate > s3_att_v3change_date) %>% nrow() + v3 = s3_df %>% dplyr::filter(StartDate > s3_att_v3change_date) %>% nrow() ) ``` @@ -441,7 +450,7 @@ completed this final version. # Main Analysis - Main ANOVA Model ANOVA_lm <- s3_df %>% # Include only those who passed the attention check - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% # 2 (Parasocial) x 2 (Social World) ANOVA on Heart Manikin Scores at Time 2 lm( formula = T2_Heart_1 ~ parasocial * social_world, @@ -455,13 +464,13 @@ ANOVA_EM <- emmeans(ANOVA_lm, pairwise ~ parasocial:social_world, level = .90) ANOVA_EM_tibble <- ANOVA_EM$contrasts %>% as_tibble() # Add APA Stats Emeans ANOVA_EM_tibble <- ANOVA_EM_tibble %>% - mutate(APA = ANOVA_EM_tibble %>% describe.emmeans(dtype = "t")) + dplyr::mutate(APA = ANOVA_EM_tibble %>% describe.emmeans(dtype = "t")) # get a list of APA s3_contratsts_APA <- ANOVA_EM_tibble %>% - select(contrast, APA) %>% + dplyr::select(contrast, APA) %>% # abbreviate the contrast labels - mutate(contrast = abbreviate(contrast)) %>% - deframe() %>% + dplyr::mutate(contrast = abbreviate(contrast)) %>% + tibble::deframe() %>% as.list() @@ -483,20 +492,20 @@ ANOVA_cohen <- eff_size(ANOVA_EM, sigma(ANOVA_lm), df.residual(ANOVA_lm), ANOVA_cohen_tibble <- ANOVA_cohen %>% as_tibble() # Shortened Labels for forestplot ANOVA_cohen_tibble <- ANOVA_cohen_tibble %>% - mutate( + dplyr::mutate( labels = str_split(contrast, " - "), label1 = map_chr(labels, ~ .[[1]]), label2 = map_chr(labels, ~ .[[2]]) ) %>% - mutate(across(c(label1, label2), ~ str_remove(., "\\(|\\)"))) %>% - mutate(across(c(label1, label2), ~ str_replace(., "Parasocial", "Para"))) %>% - mutate(across(c(label1, label2), ~ str_replace(., "Social World", "Social"))) %>% - mutate( + dplyr::mutate(across(c(label1, label2), ~ str_remove(., "\\(|\\)"))) %>% + dplyr::mutate(across(c(label1, label2), ~ stringr::str_replace(., "Parasocial", "Para"))) %>% + dplyr::mutate(across(c(label1, label2), ~ stringr::str_replace(., "Social World", "Social"))) %>% + dplyr::mutate( plot_label = paste(label1, label2, sep = " - "), key = abbreviate(plot_label) ) %>% # APA-formatted results - mutate(APA = sprintf( + dplyr::mutate(APA = sprintf( "_d_ = %s, 90%%CI [%s, %s]", effect.size %>% f.round(), lower.CL %>% f.round(), upper.CL %>% f.round() )) @@ -525,8 +534,8 @@ s3_cohensd_forestplot <- ANOVA_cohen_tibble %>% # APA list for Study 3 results ANOVA_summary_df <- ANOVA_summary %>% broom::tidy() s3_results_main <- list( - parasocial = ANOVA_summary_df %>% filter(term == "parasocial"), - social_world = ANOVA_summary_df %>% filter(term == "social_world"), + parasocial = ANOVA_summary_df %>% dplyr::filter(term == "parasocial"), + social_world = ANOVA_summary_df %>% dplyr::filter(term == "social_world"), df_res = ANOVA_summary$Df[[5]] # alternatively, use `tail(vector, n = 1)` ) @@ -537,21 +546,21 @@ s3_ANOVA_APA <- s3_ANOVA_APA %>% as.list() # Cohen's d for the main effects (Parascoial and Social World) s3_main_effects <- tibble(predictor = c("parasocial", "social_world")) %>% - mutate(formula = sprintf("T2_Heart_1 ~ %s", predictor)) %>% - mutate(ci90 = map(formula, ~ effectsize::cohens_d(x = formula(.), data = s3_df, ci = .90))) %>% - mutate(ci95 = map(formula, ~ effectsize::cohens_d(x = formula(.), data = s3_df, ci = .95))) %>% - mutate(equivalence = map(ci90, ~ equivalence_test(., range = c(-Cohen_d_target, Cohen_d_target)))) %>% - mutate(APA = map_chr(ci90, describe_d_ci)) %>% + dplyr::mutate(formula = sprintf("T2_Heart_1 ~ %s", predictor)) %>% + dplyr::mutate(ci90 = map(formula, ~ effectsize::cohens_d(x = formula(.), data = s3_df, ci = .90))) %>% + dplyr::mutate(ci95 = map(formula, ~ effectsize::cohens_d(x = formula(.), data = s3_df, ci = .95))) %>% + dplyr::mutate(equivalence = map(ci90, ~ equivalence_test(., range = c(-Cohen_d_target, Cohen_d_target)))) %>% + dplyr::mutate(APA = map_chr(ci90, describe_d_ci)) %>% unnest(c(ci90, ci95), .sep = "_") # APA results s3_main_effects_APA <- s3_main_effects %>% - select(predictor, APA) %>% - deframe() %>% + dplyr::select(predictor, APA) %>% + tibble::deframe() %>% as.list() # Get the cohens'd and the 90% ci for APA s3_contratsts_d_APA <- ANOVA_cohen_tibble %>% - select(key, APA) %>% - deframe() %>% + dplyr::select(key, APA) %>% + tibble::deframe() %>% as.list() # Combine the t-test results and Cohen's d s3_contrats_td_APA <- paste(s3_contratsts_APA, s3_contratsts_d_APA, sep = ", ") @@ -594,7 +603,7 @@ equivalent to zero. # 2 x 2 ANOVA on Time 2 belonging s3_heart2_plot <- s3_included %>% # Only include people who got the attention check right - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% ggplot(aes( x = parasocial, color = social_world, y = T2_Heart_1 @@ -652,7 +661,7 @@ ggarrange(s3_heart2_plot, # Parasocial Relationships Manipulation Check ------------------------------ # Chi-square test parasocial_MC_results <- s3_df %>% - filter(attention_all_correct == TRUE) %>% # Exclude those failed attention check + dplyr::filter(attention_all_correct == TRUE) %>% # Exclude those failed attention check { chisq.test( x = .$parasocial, @@ -662,11 +671,11 @@ parasocial_MC_results <- s3_df %>% # Single-Item Immersion Manipulation Check ----------------------------------- # Welch's t-test for Immersion Scale immersion_MC_results <- s3_df %>% - filter(attention_all_correct == TRUE) %>% # Exclude those failed attention check + dplyr::filter(attention_all_correct == TRUE) %>% # Exclude those failed attention check t.test(formula = Single_Immersion ~ social_world, data = .) # OTF Social World Scale Manipulation Check ----------------------------------- OTFWorld_MC_results <- s3_df %>% - filter(attention_all_correct == TRUE) %>% # Exclude those failed attention check + dplyr::filter(attention_all_correct == TRUE) %>% # Exclude those failed attention check t.test(formula = OTF_Social_World ~ social_world, data = .) # APA text for the exploratyr manipulation check s3_manicheck_APA <- list( @@ -711,11 +720,11 @@ effectiveness of the manipulation based on these analysis. # Parasocial Relationship Manipulation Check plot s3_parasocial_plot <- s3_df %>% - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% # Modify labels - mutate( + dplyr::mutate( parasocial_MC_group_shortlabel = - case_when( + dplyr::case_when( parasocial_MC_group == "No Interaction with NPC" ~ "No Interaction", parasocial_MC_group == "No Parasocial Relationship with NPC" ~ "Interacted, but no PSR", parasocial_MC_group == "Formed Parasocial Relationship with NPC" ~ "Formed PSR" @@ -774,12 +783,12 @@ ggarrange(s3_parasocial_plot, ```{r s3-enjoyment-analysis} s3_enjoyment_fit <- s3_df %>% - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% lm(formula = Enjoyment ~ parasocial * social_world) s3_enjoyment_Anova <- s3_enjoyment_fit %>% Anova(type = 3) s3_enjoyment_APA_df <- summarize_Anova_with_APA(s3_enjoyment_fit) s3_enjoyment_txt <- s3_enjoyment_APA_df %>% - select(term, APA) %>% + dplyr::select(term, APA) %>% deframe_as_list() ``` @@ -810,11 +819,11 @@ moderation_race_lm <- s3_df %>% ### Gender # emmeans(moderation_gender_lm, pairwise ~ parasocial:social_world:Gender_Identity_3GP, level = .90) moderation_gender_APA <- describe.glm(moderation_gender_lm, dtype = 3) %>% - select(eff, str) %>% + dplyr::select(eff, str) %>% deframe_as_list() ### Race moderation_race_APA <- describe.glm(moderation_race_lm, dtype = 3) %>% - select(eff, str) %>% + dplyr::select(eff, str) %>% deframe_as_list() ``` @@ -852,9 +861,9 @@ social world were not moderated by gender or racial identities. # Moderation by Gender - Plot ---------------------------------------------------- s3_mod_by_gender_plot <- s3_df %>% # Exclude those who failed attention check - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% # Exclude people identifying other than male or female (since htere are too few) - filter(Gender_Identity_3GP != "Other") %>% + dplyr::filter(Gender_Identity_3GP != "Other") %>% ggplot(aes( x = parasocial, y = T2_Heart_1, @@ -904,7 +913,7 @@ s3_mod_by_gender_plot <- s3_df %>% # Visualize s3_mod_by_race_plot <- s3_df %>% # Exclude those who failed attention check - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% ggplot(aes( x = parasocial, y = T2_Heart_1, @@ -959,10 +968,10 @@ ggarrange(s3_mod_by_gender_plot, ```{r s3-manikins-across-time} # Create a long dataset for all manikin variables s3_df_long <- s3_df %>% - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% pivot_longer(cols = c(starts_with("T1"), starts_with("T2"))) %>% - mutate(name = str_remove(name, "_1")) %>% - mutate( + dplyr::mutate(name = str_remove(name, "_1")) %>% + dplyr::mutate( time = str_extract(name, "T\\d"), # Get the name followed by _ repvarname = str_extract(name, "(?:[^_](?!_))+$") @@ -977,7 +986,7 @@ s3_df_long <- s3_df %>% # Mixed Anova model s3_manikins_time <- tibble(outcome = c("Heart", "Valence", "Arousal", "Dominance")) %>% - mutate( + dplyr::mutate( fit = map( outcome, ~ rstatix::anova_test( @@ -991,24 +1000,24 @@ s3_manikins_time <- tibble(outcome = c("Heart", "Valence", "Arousal", "Dominance }) ) %>% # Go through the tibble to create the APA -compatible string - mutate(APA = map(APA, describe_anova_test)) + dplyr::mutate(APA = map(APA, describe_anova_test)) # Time meansd s3_time_msd <- s3_df_long %>% describe_by_factor(time, vars = c(Heart, Valence, Arousal, Dominance)) %>% get_APA_from_msd() ## APA for Tiem comparisons s3_time_APA <- s3_time_msd %>% - mutate(label = str_c(time, variable)) %>% - select(label, APA) %>% + dplyr::mutate(label = str_c(time, variable)) %>% + dplyr::select(label, APA) %>% deframe_as_list() # APA s3_manikins_time_list <- s3_manikins_time %>% - mutate(APA_def = map(APA, function(x) { + dplyr::mutate(APA_def = map(APA, function(x) { x %>% - select(Effect, APA) %>% + dplyr::select(Effect, APA) %>% deframe_as_list() })) %>% - select(outcome, APA_def) %>% + dplyr::select(outcome, APA_def) %>% deframe_as_list() ``` @@ -1089,12 +1098,12 @@ ggarrange(draw_s3_manikin_plot(Heart), ```{r s3-identification} # identification s3_identification_lm <- s3_df %>% - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% lm(formula = PC_Identification ~ parasocial * social_world) ## Add APA-Style Texts s3_identification_summary <- summarize_Anova_with_APA(s3_identification_lm) s3_identification_APA <- s3_identification_summary %>% - select(term, APA) %>% + dplyr::select(term, APA) %>% deframe_as_list() ``` @@ -1140,13 +1149,13 @@ s3_mod_by_mc <- expand_grid( moderator = s3_mc_indices ) # Join labels -s3_mod_by_mc <- s3_mod_by_mc %>% left_join(s3_var_labels, by = c("moderator" = "varname")) +s3_mod_by_mc <- s3_mod_by_mc %>%left_join(s3_var_labels, by = c("moderator" = "varname")) # Turn off ordered factor s3_df_long <- s3_df_long %>% - mutate(parasocial_MC_group = fct_inorder(parasocial_MC_group, ordered = FALSE)) + dplyr::mutate(parasocial_MC_group = fct_inorder(parasocial_MC_group, ordered = FALSE)) # IOS, immersion to continuous s3_df_long <- s3_df_long %>% - mutate( + dplyr::mutate( IOS = as.character(IOS) %>% as.numeric(), Single_Immersion = Single_Immersion %>% as.character() %>% as.numeric() ) @@ -1165,14 +1174,14 @@ s3_df_long <- s3_df_long %>% # Mean-Centering the moderators s3_df_long <- s3_df_long %>% # group by time will actually grand-mean center since the moderators are between scales - group_by(time) %>% - mutate(across(all_of(s3_mc_indices[-1]), + dplyr::group_by(time) %>% + dplyr::mutate(across(all_of(s3_mc_indices[-1]), .fns = list(c = function(x) x - mean(x, na.rm = TRUE)) )) %>% - ungroup() + dplyr::ungroup() # add "_c" to centered moderator s3_mod_by_mc <- s3_mod_by_mc %>% - mutate(moderator = case_when( + dplyr::mutate(moderator = dplyr::case_when( moderator == "parasocial_MC_group" ~ moderator, TRUE ~ paste0(moderator, "_c") )) @@ -1180,8 +1189,8 @@ s3_mod_by_mc <- s3_mod_by_mc %>% # Prepare formula s3_mod_by_mc <- s3_mod_by_mc %>% - mutate(formula = paste0(outcome, " ~ ", moderator, " * parasocial * social_world * time + (1|ResponseId)")) %>% - mutate(contrasts = case_when( + dplyr::mutate(formula = paste0(outcome, " ~ ", moderator, " * parasocial * social_world * time + (1|ResponseId)")) %>% + dplyr::mutate(contrasts = dplyr::case_when( moderator == "parasocial_MC_group" ~ list( parasocial_MC_group = contr.sum, social_world = contr.sum, @@ -1195,36 +1204,36 @@ s3_mod_by_mc <- s3_mod_by_mc %>% ) %>% list() )) %>% # run mixed model - mutate(model = map2(formula, contrasts, ~ lmer(formula = .x, data = s3_df_long, contrasts = .y))) %>% + dplyr::mutate(model = map2(formula, contrasts, ~ lmer(formula = .x, data = s3_df_long, contrasts = .y))) %>% # tidy the model results - mutate( + dplyr::mutate( tidy = map(model, broom.mixed::tidy), APA = map(model, ~ describe.glm(., dtype = 3)) ) # Output as a list s3_mod_mc_APA <- s3_mod_by_mc %>% - mutate(APA = map(APA, function(x) { + dplyr::mutate(APA = map(APA, function(x) { x %>% - select(eff, str) %>% + dplyr::select(eff, str) %>% deframe_as_list() })) %>% - select(outcome, moderator, APA) %>% + dplyr::select(outcome, moderator, APA) %>% # Group by outcome to perform deframe as list only among each group - group_by(outcome) %>% + dplyr::group_by(outcome) %>% # Create a named list of moderator and the APA results, grouped by outcome - mutate(APA = cur_data() %>% select(moderator, APA) %>% deframe_as_list()) %>% + dplyr::mutate(APA = cur_data() %>% dplyr::select(moderator, APA) %>% deframe_as_list()) %>% # nest so that each row will be outcome. data column is a tibble of moderator and APA nest(data = c(moderator, APA)) %>% # For each data tibble, deframe as list - mutate(data = map(data, ~ deframe_as_list(.))) %>% + dplyr::mutate(data = map(data, ~ deframe_as_list(.))) %>% # Deframe the entire tibble as list deframe_as_list() # Plot s3_mod_plots <- s3_mod_by_mc %>% - filter(outcome == "Heart") %>% - pull(model) %>% + dplyr::filter(outcome == "Heart") %>% +dplyr::pull(model) %>% map(plot_s3_mod_bymc) ``` diff --git a/08-Appendix.qmd b/08-Appendix.qmd index 194bad4..d83fff5 100644 --- a/08-Appendix.qmd +++ b/08-Appendix.qmd @@ -7,7 +7,14 @@ knitr::opts_chunk$set( warning = FALSE ) # Load package -library(tidyverse) +library(dplyr) +library(tidyr) +library(purrr) +library(stringr) +library(forcats) +library(readr) +library(tibble) + library(codebook) # to generate codebook library(here) library(car) # for the type III SS in ANOVA @@ -199,7 +206,7 @@ s1a_mnsdcor %>% as_tibble() %>% ```{r appendix-s1b-correlations-table} # N, Mean, SD Table -s1b_mnsdcor <- s1b_target_vars %>% select(-id, -visit) %>% get_nmsd_cor_table() +s1b_mnsdcor <- s1b_target_vars %>% dplyr::select(-id, -visit) %>% get_nmsd_cor_table() # Footnote for the table s1b_footnote <- paste(sep = " ", @@ -297,9 +304,9 @@ I explored whether the heart manikin scores changed across time by condition in ```{r appendix-s1c-mixed} s1c_sensitivity_mixed_df <- tibble(outcome = c("heart", "valence")) %>% - mutate(formula = paste0(outcome, "~ grouping_dummy * time + (1|id)")) %>% - mutate(model = map(formula, ~lmer(formula = ., data = s1c_df_long))) %>% - mutate(tidy = map(model, ~broom.mixed::tidy(.)), + dplyr::mutate(formula = paste0(outcome, "~ grouping_dummy * time + (1|id)")) %>% + dplyr::mutate(model = map(formula, ~lmer(formula = ., data = s1c_df_long))) %>% + dplyr::mutate(tidy = map(model, ~broom.mixed::tidy(.)), APA = map(model, ~describe.glm(.))) ``` @@ -476,7 +483,7 @@ I explored whether participants reported different levels of belonging across ti #| label: fig-appendix-s1e-belonging-plot # s1e Plot - Heart by Condition across time s1e_heart_plot <- s1e_df_long %>% - mutate(across(c(rejection, confederate_desire), to_factor)) %>% + dplyr::mutate(across(c(rejection, confederate_desire), to_factor)) %>% ggplot(aes(x = time, y = heart, color = paste(rejection, confederate_desire), group = paste(rejection, confederate_desire))) + # Plot Data Points @@ -514,11 +521,11 @@ NTS_vars <- c("nts_belonging_mean", "nts_esteem_mean", "nts_control_mean", "nts_meaning_mean", "nts_mean") s1e_rejection_NTS_df <- tibble(outcome = NTS_vars) %>% - mutate(formula = (paste0(outcome, " ~ desires_dummy * rejection * time + (1|id)"))) %>% - mutate(fit = map(formula, ~lmer(formula = as.formula(.), + dplyr::mutate(formula = (paste0(outcome, " ~ desires_dummy * rejection * time + (1|id)"))) %>% + dplyr::mutate(fit = map(formula, ~lmer(formula = as.formula(.), data = s1e_df_long))) %>% - mutate(tidy = map(fit, broom::tidy)) %>% - mutate(described = map(fit, describe.glm)) + dplyr::mutate(tidy = map(fit, broom::tidy)) %>% + dplyr::mutate(described = map(fit, describe.glm)) # Create Plots ## caption @@ -606,21 +613,21 @@ s1e_NTS_T5_vars <- c("nts_T5_belonging_mean", "nts_T5_esteem_mean", "nts_T5_mean") s1e_NTS_moderation_by_esteem_df <- tibble(outcome = s1e_NTS_T5_vars) %>% - mutate(formula = (paste0(outcome, " ~ desires_dummy * rejection * esteem_mean"))) %>% - mutate(fit = map(formula, ~lm(formula = as.formula(.), + dplyr::mutate(formula = (paste0(outcome, " ~ desires_dummy * rejection * esteem_mean"))) %>% + dplyr::mutate(fit = map(formula, ~lm(formula = as.formula(.), data = s1e_df))) %>% - mutate(tidy = map(fit, tidy)) %>% - mutate(described = map(fit, describe.glm)) %>% - mutate(plot = map2(.x = fit, .y = outcome, ~plot_fit(.x, .y))) + dplyr::mutate(tidy = map(fit, tidy)) %>% + dplyr::mutate(described = map(fit, describe.glm)) %>% + dplyr::mutate(plot = map2(.x = fit, .y = outcome, ~plot_fit(.x, .y))) s1e_NTS_moderation_by_esteem_nocond_df <- tibble(outcome = s1e_NTS_T5_vars) %>% - mutate(labels = c("Belonging (T5)", "Self-Esteem (T5)", "Control (T5)", "Meaningful Existence (T5)", "Overall NTS (T5)")) %>% - mutate(formula = (paste0(outcome, " ~ rejection * esteem_mean"))) %>% - mutate(fit = map(formula, ~lm(formula = as.formula(.), + dplyr::mutate(labels = c("Belonging (T5)", "Self-Esteem (T5)", "Control (T5)", "Meaningful Existence (T5)", "Overall NTS (T5)")) %>% + dplyr::mutate(formula = (paste0(outcome, " ~ rejection * esteem_mean"))) %>% + dplyr::mutate(fit = map(formula, ~lm(formula = as.formula(.), data = s1e_df))) %>% - mutate(tidy = map(fit, tidy)) %>% - mutate(described = map(fit, describe.glm)) %>% - mutate(plot = map2(fit, labels, function(x = .x, title = .y){ + dplyr::mutate(tidy = map(fit, tidy)) %>% + dplyr::mutate(described = map(fit, describe.glm)) %>% + dplyr::mutate(plot = map2(fit, labels, function(x = .x, title = .y){ sjPlot::plot_model(x, terms = c("esteem_mean", "rejection"), type = "pred") + labs(title = title, x = "Self-Esteem", y = title) })) @@ -668,8 +675,8 @@ s2_continuous_predictors <- c("IOS" = "IOS", "PSI" = "PSI", s2_target_vars <- c(s2_outcomes, s2_continuous_predictors) # Create a target columns-only df with renamed columns via `select` s2_target_cols <- s2_df %>% - filter(attention_all_correct, followed_VGinstructions == "Yes") %>% - select(all_of(s2_target_vars)) + dplyr::filter(attention_all_correct, followed_VGinstructions == "Yes") %>% + dplyr::select(all_of(s2_target_vars)) # N, Mean, SD Table s2_mnsdcor <- s2_target_cols %>% get_nmsd_cor_table() # Render @@ -685,14 +692,15 @@ s2_mnsdcor %>% as_tibble() %>% #### Bivariate Scatter Plot Matrix ```{r s2-matplot, fig.cap = "Matrix Plot for Study 2 Variables", fig.height=15, fig.width=15, message=FALSE, warning=FALSE, cache=TRUE} +#| label: fig-s2-matplot # Study 2 - Matrix Plot s2_df %>% - select(essay_condition, T1_Heart_1, T1_Valence_1, T1_Arousal_1, T1_Dominance_1, + dplyr::select(essay_condition, T1_Heart_1, T1_Valence_1, T1_Arousal_1, T1_Dominance_1, T2_Heart_1, T2_Valence_1, T2_Arousal_1, T2_Dominance_1, IOS, PSI, Single_Immersion,Narrative_Engagement, OTF_Social_World, Enjoyment) %>% # Rename variables - rename("Essay" = essay_condition, "T1 Heart" = T1_Heart_1, "T1 Valence" = T1_Valence_1, +dplyr::rename("Essay" = essay_condition, "T1 Heart" = T1_Heart_1, "T1 Valence" = T1_Valence_1, "T1 Arousal" = T1_Arousal_1, "T1 Dominance" = T1_Dominance_1, "T2 Heart" = T2_Heart_1, "T2 Valence" = T2_Valence_1, "T2 Arousal" = T2_Arousal_1, "T2 Dominance" = T2_Dominance_1, @@ -709,16 +717,16 @@ s2_df %>% ## Word Cloud # create words df - one row = a game title s2_game_names <- s2_df %>% - filter(followed_VGinstructions == "Yes") %>% + dplyr::filter(followed_VGinstructions == "Yes") %>% unnest_tokens(word, target_game_name_cl, token = "lines", to_lower = FALSE) %>% - group_by(essay_condition) %>% count(word, sort = TRUE) %>% - mutate(proportion = n/sum(n)) + dplyr::group_by(essay_condition) %>% count(word, sort = TRUE) %>% + dplyr::mutate(proportion = n/sum(n)) set.seed(2021) # Word cloud for Social Surrogacy Condition s2_game_names %>% - filter(essay_condition == "Social Surrogacy") %>% + dplyr::filter(essay_condition == "Social Surrogacy") %>% with(wordcloud(word, n, rot.per = 0, min.freq = 1, scale = c(4, .8), random.color = TRUE, colors = brewer.pal(5, "Set2"))) @@ -727,7 +735,7 @@ s2_game_names %>% ```{r s2-word-cloud-nonsurrogate, fig.cap='Word Cloud for Game Titles for the Non-Social Surrogate Condition', fig.width=10, fig.height=9} # Word cloud for Non-Social Surrogacy Condition s2_game_names %>% - filter(essay_condition == "Non-Social Surrogacy") %>% + dplyr::filter(essay_condition == "Non-Social Surrogacy") %>% with(wordcloud(word, n, rot.per = 0, min.freq = 1, scale = c(4, .8), random.color = TRUE, colors = brewer.pal(5, "Set2"))) @@ -800,13 +808,13 @@ Words such as "character" and "story" appeared more frequently in the social sur # Load Public text file - Anonymous s2_VG_txt <- read_rds(here("data_public", "Study2_VG_txt.rds")) # Convert to wide, one row is now condition x word -s2_VG_txt_wide <- s2_VG_txt %>% select(-n) %>% +s2_VG_txt_wide <- s2_VG_txt %>% dplyr::select(-n) %>% pivot_wider(names_from = essay_condition, values_from = proportion) # Create a frequency plot s2_VG_word_freq_plot <- s2_VG_txt %>% - filter(n > 50) %>% - mutate(word = reorder(word, n)) %>% + dplyr::filter(n > 50) %>% + dplyr::mutate(word = reorder(word, n)) %>% ggplot(aes(n, word)) + geom_col() + labs(y = NULL) + @@ -819,7 +827,7 @@ s2_VG_word_freq_plot <- s2_VG_txt %>% #| # expect a warning about rows with missing values being removed s2_VG_txt_wide %>% - filter(!str_detect(word, "\\d+")) %>% + dplyr::filter(!str_detect(word, "\\d+")) %>% ggplot(aes(x = `Non-Social Surrogacy`, y = `Social Surrogacy`)) + geom_abline(color = "gray40", lty = 2) + geom_jitter(alpha = 0.1, size = 2.5, width = 0.3, height = 0.3) + @@ -850,14 +858,14 @@ s2_exit_q_summary <- s2_df %>% ```{r s2-exit-questions-plot, fig.cap='Study 2 - Lengths of Participant Answers to Exit Questions Across Question Order'} #| label: fig-s2-exit-questions-plot # Share anything -s2_share_anything_plot <- s2_df %>% filter(!is.na(Exit_Q_Order)) %>% +s2_share_anything_plot <- s2_df %>% dplyr::filter(!is.na(Exit_Q_Order)) %>% ggplot(aes(y = Share_Anything_Len, x = Exit_Q_Order, color = Exit_Q_Order)) + default_violin + ylab("Answer to 'Share Anything' Question\n(Number of Characters)") + xlab("Question Order") + labs(color = "Question Order") # Study Purpose -s2_study_purpose_plot <- s2_df %>% filter(!is.na(Exit_Q_Order)) %>% +s2_study_purpose_plot <- s2_df %>% dplyr::filter(!is.na(Exit_Q_Order)) %>% ggplot(aes(y = Study_Purpose_Len, x = Exit_Q_Order, color = Exit_Q_Order)) + default_violin + ylab("Answer to 'Study Purpose' Question\n(Number of Characters)") + @@ -883,7 +891,7 @@ s3_continuous_predictors <- c("IOS" = "IOS", "Immersion" = "Single_Immersion", "Enjoyment" = "Enjoyment") s3_target_vars <- c(s3_outcomes, s3_continuous_predictors) # Target columns -s3_target_cols <- s3_df %>% filter(attention_all_correct) %>% select(all_of(s3_target_vars)) +s3_target_cols <- s3_df %>% dplyr::filter(attention_all_correct) %>% dplyr::select(all_of(s3_target_vars)) # N, Mean, SD Table s3_nmsd_table <- s3_target_cols %>% summarise_descriptives() # Correlation Table with stars @@ -901,15 +909,16 @@ s3_mnsdcor %>% as_tibble() %>% ``` ```{r appendix-s3-ggpairs, fig.cap='Study 3 - Bivariate Scatter Plot Matrix',fig.height=15, fig.width=15, out.width='100%', message=FALSE, warning=FALSE, cache=TRUE} +#| label: fig-appendix-s3-ggpairs # GG Pairs for Study 3 s3_df %>% - select(parasocial, social_world, T1_Heart_1, T1_Valence_1, T1_Arousal_1, T1_Dominance_1, + dplyr::select(parasocial, social_world, T1_Heart_1, T1_Valence_1, T1_Arousal_1, T1_Dominance_1, T2_Heart_1, T2_Valence_1, T2_Arousal_1, T2_Dominance_1, IOS, Single_Immersion, OTF_Social_World, PC_Identification) %>% # Rename variables - rename("Parasocial" = parasocial, "Social World" = social_world, +dplyr::rename("Parasocial" = parasocial, "Social World" = social_world, "Heart T1" = T1_Heart_1, "Valence T1" = T1_Valence_1, "Arousal T1" = T1_Arousal_1, "Dominance T1" = T1_Dominance_1, "Heart T2" = T2_Heart_1, "Valence T2" = T2_Valence_1, @@ -959,4 +968,4 @@ knitr::include_graphics("pdf/copyright.pdf") # How to insert multiple page pdfs: # https://stackoverflow.com/questions/52486342/how-to-add-a-multipage-pdf-to-rmarkdown -``` \ No newline at end of file +``` diff --git a/r/count_total_participants.R b/r/count_total_participants.R index b2b94f3..937e2f5 100644 --- a/r/count_total_participants.R +++ b/r/count_total_participants.R @@ -37,7 +37,7 @@ count_total_participants <- function() { s3_df <- load_s3_df() s3_attention_df <- s3_df |> dplyr::group_by(attention_all_correct) |> - dplyr::summarise(n = n()) |> + dplyr::summarise(n =dplyr::n()) |> deframe_as_list() # Sum up diff --git a/r/get_s1c_sensitivity_APA.R b/r/get_s1c_sensitivity_APA.R index 57cee8e..154e622 100644 --- a/r/get_s1c_sensitivity_APA.R +++ b/r/get_s1c_sensitivity_APA.R @@ -10,12 +10,12 @@ get_s1c_sensitivity_APA <- function(df) { df |> describe_by_factor(acceptance, vars = heart2) |> - mutate( + dplyr::mutate( msd = to_msd(heart2_mean, heart2_sd), acceptance = to_factor(acceptance) ) |> - select(acceptance, msd) |> - deframe() |> + dplyr::select(acceptance, msd) |> + tibble::deframe() |> as.list() |> append(list( t = s1c_sensitivity_mod |> describe.ttest(), diff --git a/r/load_s3_df.R b/r/load_s3_df.R index 103f70b..4d9bbaa 100644 --- a/r/load_s3_df.R +++ b/r/load_s3_df.R @@ -1,6 +1,6 @@ load_s3_df <- function() { - s3_df <- read_rds( + s3_df <- readRDS( here("data_public", "Study3_public.rds") ) %>% - filter(Finished == 1) + dplyr::filter(Finished == 1) } diff --git a/r/namifunc.R b/r/namifunc.R index 5f1d18f..bcbe914 100644 --- a/r/namifunc.R +++ b/r/namifunc.R @@ -20,7 +20,6 @@ library(ggtext) library(apastats) library(kableExtra) library(effectsize) # for equivalence tests and effect size estimation -library(tidyverse) # Project-Specific Parameters ## SESOI for ROPE in the equivalence test @@ -50,8 +49,8 @@ describe_by_factor <- function(df, ..., vars) { # vars are target variables .group <- enquos(...) df %>% - select(!!!.group, {{ vars }}) %>% - group_by(!!!.group) %>% + dplyr::select(!!!.group, {{ vars }}) %>% + dplyr::group_by(!!!.group) %>% dplyr::summarise(across( everything(), list(mean = ~ mean(., na.rm = TRUE), sd = ~ sd(., na.rm = TRUE)) @@ -64,7 +63,7 @@ get_desc_db <- function(describe_by_factor_df, # Get descriptives table by factor and produce a query-able data frame descriptives_long <- describe_by_factor_df %>% pivot_longer(c(ends_with("mean"), ends_with("sd"))) %>% - mutate( + dplyr::mutate( variable = str_match(name, "^[^_].+(?=_)") %>% as.vector(), stat = str_match(name, "(?:[^_](?!_))+$") %>% as.vector() ) @@ -78,13 +77,13 @@ get_APA_from_msd <- function(describe_by_factor_df, descriptives_long <- describe_by_factor_df %>% pivot_longer(c(ends_with("mean"), ends_with("sd"))) %>% # Parse with underscores - mutate( + dplyr::mutate( variable = str_match(name, "^[^_].+(?=_)") %>% as.vector(), stat = str_match(name, "(?:[^_](?!_))+$") %>% as.vector() ) db <- descriptives_long %>% pivot_wider(-name, values_from = value, names_from = stat) %>% - mutate(APA = to_msd(mean %>% f.round(), sd %>% f.round())) + dplyr::mutate(APA = to_msd(mean %>% f.round(), sd %>% f.round())) return(db) } @@ -92,7 +91,7 @@ get_APA_from_msd <- function(describe_by_factor_df, summarise_descriptives <- function(df) { df %>% # create a summary row with _n, _nmean, _sd - summarise(across( +dplyr::summarise(across( where(is.numeric), list( n = ~ sum(!is.na(.x)), @@ -103,16 +102,16 @@ summarise_descriptives <- function(df) { # Pivot to the long format (1 column) pivot_longer(everything()) %>% # Label types using regex (get characters after "_") - mutate(type = str_extract(name, "[^_]*$")) %>% + dplyr::mutate(type = str_extract(name, "[^_]*$")) %>% # Repeat the scale names (since we are dealing with n, mean, sd, rep is 3) - mutate(scale = rep(names(df), each = 3)) %>% + dplyr::mutate(scale = rep(names(df), each = 3)) %>% # pivot to wider form. Each row is a scale, and each columns are n, mean, sd pivot_wider( names_from = type, id_cols = scale ) %>% # Mutate to number the variables (for correlation) - mutate(scale = paste0(row_number(), ". ", scale)) + dplyr::mutate(scale = paste0(dplyr::row_number(), ". ", scale)) } ## Get Bivariate Correlations with 90 and 95% CI's @@ -125,7 +124,7 @@ get_correlations <- function(target_df, conf.level2 <- 0.90 # Calculate correlations via cor.test() df <- target_df %>% - summarise(across( +dplyr::summarise(across( !!vars, ~ list( model95 = cor.test(pull(cur_data(), !!outcome), .x, conf.level = conf.level1), @@ -133,23 +132,23 @@ get_correlations <- function(target_df, ) )) model_names <- df %>% - pull(1) %>% +dplyr::pull(1) %>% names() df <- df %>% - mutate(Conf_Level = model_names) %>% + dplyr::mutate(Conf_Level = model_names) %>% pivot_longer(-Conf_Level, values_to = "model", names_to = c("label")) # Extract numbers from the calculated model using map() df <- df %>% - mutate( + dplyr::mutate( model_df = map(model, ~ broom::glance(.)), # Create an APA paragraph with APA = map_chr(model, ~ describe.r(., .add_CI = TRUE)) ) %>% # Add Confidence Intervals XX%[XX, XX] to APA string - # mutate(APA = paste0(APA, ",", to_CI_str(Conf_Level, lower = conf.low, upper = conf.high))) %>% + # dplyr::mutate(APA = paste0(APA, ",", to_CI_str(Conf_Level, lower = conf.low, upper = conf.high))) %>% unnest(model_df) %>% # name parameter to df - rename(df = "parameter") +dplyr::rename(df = "parameter") # Wider to create separate columns for 90 & 95% CIs df <- df %>% pivot_wider( @@ -164,14 +163,14 @@ get_correlations <- function(target_df, summarize_Anova_with_APA <- function(lm, .sumtype = 3) { Anova_obj <- lm %>% Anova(type = .sumtype) output_tibble <- broom::tidy(Anova_obj) %>% - mutate(APA = describe.Anova(Anova_obj)) + dplyr::mutate(APA = describe.Anova(Anova_obj)) return(output_tibble) } ## Deframe and aslist deframe_as_list <- function(x) { x %>% - deframe() %>% + tibble::deframe() %>% as.list() } @@ -241,7 +240,7 @@ s3_draw_mc_plot <- function(df = s3_df, y, ylab, caption = "") { df %>% # Exclude those failed attention check - filter(attention_all_correct == TRUE) %>% + dplyr::filter(attention_all_correct == TRUE) %>% ggplot(aes(x = {{ x }}, y = {{ y }}, color = {{ color }})) + # Default violin, errorbar plot default_violin + @@ -260,7 +259,7 @@ s3_draw_mc_plot <- function(df = s3_df, y, ylab, # Across-Time Plot for Study 1e s1e_plot_across_time <- function(df_long, y, ylab = "", caption = "", title = "") { df_long %>% - mutate(across(c(rejection, confederate_desire), to_factor)) %>% + dplyr::mutate(across(c(rejection, confederate_desire), to_factor)) %>% ggplot(aes( x = time, y = {{ y }}, color = paste(rejection, confederate_desire), group = paste(rejection, confederate_desire) @@ -362,16 +361,16 @@ s1_render_kable <- function(df, studykey = " ") { # get the named list of packing # Filter so that only the label rows will be returned (intervals will be the length of packing) pack_named_index <- df %>% - filter(!is.na(intervals)) %>% - select(labels, intervals) %>% - deframe() + dplyr::filter(!is.na(intervals)) %>% + dplyr::select(labels, intervals) %>% + tibble::deframe() # Get the names of the named list - pack_named_index_names <- names(pack_named_index) %>% str_trim() + pack_named_index_names <- names(pack_named_index) %>% stringr::str_trim() # Check if Needs packing needs_packing <- (sum(pack_named_index_names == "") != length(pack_named_index_names)) # Create Kable returned_kable <- df %>% - select(Measure, Time, Construct, Validity, Citation) %>% + dplyr::select(Measure, Time, Construct, Validity, Citation) %>% kbl( caption = paste0("Summary of Measures for Study", " 1", studykey), booktabs = TRUE @@ -439,7 +438,7 @@ describe_chi_htest <- function(x) { ## Describe rstatix::anova_test() describe_anova_test <- function(x) { x %>% - mutate(APA = sprintf( + dplyr::mutate(APA = sprintf( "_F_(%s, %s) = %s, _p_ %s, $\\eta^2_{G}$ %s", DFn, DFd, F %>% f.round(), p %>% round.p(), @@ -503,11 +502,11 @@ get_reliability_output <- function(reliability) { add_eqtest <- function(lm_df) { outdf <- lm_df %>% # use ci = .95 for 95% overall confidence (calculate 90% CI for estimates) - mutate(eq_test = map(model, ~ equivalence_test(., range = ROPE_r, ci = .95, rule = "classic"))) %>% + dplyr::mutate(eq_test = map(model, ~ equivalence_test(., range = ROPE_r, ci = .95, rule = "classic"))) %>% # Extract the results for the predictor - mutate(eq_pred = map_chr(eq_test, ~ .$ROPE_Equivalence[[2]])) %>% + dplyr::mutate(eq_pred = map_chr(eq_test, ~ .$ROPE_Equivalence[[2]])) %>% # Add labels for plot annotation - mutate(eq_label = case_when( + dplyr::mutate(eq_label = dplyr::case_when( eq_pred == "Rejected" ~ "", eq_pred == "Accepted" ~ "*", # Unicode @@ -537,16 +536,16 @@ evaluate_equivalence <- function(lower, upper, ROPE) { # Get n and percent by specified group and convert it to list for formatting get_n_pct <- function(df, ...) { df %>% - group_by(...) %>% - summarise(n = n()) %>% - ungroup() %>% - mutate(pct = round(n / sum(n) * 100, 2)) %>% + dplyr::group_by(...) %>% +dplyr::summarise(n =dplyr::n()) %>% + dplyr::ungroup() %>% + dplyr::mutate(pct = round(n / sum(n) * 100, 2)) %>% pivot_longer(cols = c(n, pct)) %>% - nest_by(...) %>% - ungroup() %>% # Ungroup to avoid cannot be recycled one error - mutate(data = map(data, function(x) x %>% deframe_as_list())) %>% + dplyr::nest_by(...) %>% + dplyr::ungroup() %>% # Ungroup to avoid cannot be recycled one error + dplyr::mutate(data = map(data, function(x) x %>% deframe_as_list())) %>% unite("label", ...) %>% - select(label, data) %>% + dplyr::select(label, data) %>% deframe_as_list() } @@ -564,11 +563,11 @@ plot_s2_moderation <- function(model) { # Labels function s2mod_relabel <- function(x) { x %>% - str_replace("parasocial_MC_group2-1", "Group [2-1]") %>% - str_replace("parasocial_MC_group3-2", "Group [3-2]") %>% - str_replace(cur_mod, cur_mod_label) %>% - str_replace("essay_condition1", "Essay [Surrogacy]") %>% - str_replace("Time1", "Time [1]") %>% + stringr::str_replace("parasocial_MC_group2-1", "Group [2-1]") %>% + stringr::str_replace("parasocial_MC_group3-2", "Group [3-2]") %>% + stringr::str_replace(cur_mod, cur_mod_label) %>% + stringr::str_replace("essay_condition1", "Essay [Surrogacy]") %>% + stringr::str_replace("Time1", "Time [1]") %>% str_replace_all(":", " x ") } @@ -628,12 +627,12 @@ plot_s3_mod_bymc <- function(model) { # Function to relabel the variabels s3mod_relabel <- function(x) { x %>% - str_replace("parasocial_MC_group1", "Group [1]") %>% - str_replace("parasocial_MC_group2", "Group [2]") %>% - str_replace(current_moderator, current_moderator_label) %>% - str_replace("social_world1", "Social World [High]") %>% - str_replace("parasocial1", "Parasocial [High]") %>% - str_replace("time1", "Time [1]") %>% + stringr::str_replace("parasocial_MC_group1", "Group [1]") %>% + stringr::str_replace("parasocial_MC_group2", "Group [2]") %>% + stringr::str_replace(current_moderator, current_moderator_label) %>% + stringr::str_replace("social_world1", "Social World [High]") %>% + stringr::str_replace("parasocial1", "Parasocial [High]") %>% + stringr::str_replace("time1", "Time [1]") %>% str_replace_all(":", " x ") } @@ -750,18 +749,18 @@ get_APA_from_emm <- function(emm) { stats_df <- emm %>% as_tibble() %>% - select(ends_with(".trend"):last_col()) + dplyr::select(ends_with(".trend"):last_col()) labels_df <- emm %>% as_tibble() %>% - select(1:ends_with(".trend")) %>% - select(-last_col()) + dplyr::select(1:ends_with(".trend")) %>% + dplyr::select(-last_col()) # Combine the labels as the first row labels_df <- labels_df %>% unite(col = "label") # Stats df stats_df <- stats_df %>% - mutate(APA = get_slope_APA( + dplyr::mutate(APA = get_slope_APA( b = .[[1]], SE = SE, ci.lower = lower.CL, ci.upper = upper.CL @@ -773,6 +772,6 @@ get_APA_from_emm <- function(emm) { # Deframe as list APA_output <- combined_df %>% - select(1, APA) %>% + dplyr::select(1, APA) %>% deframe_as_list() } diff --git a/r/s2_count_participants_by_attention_check.R b/r/s2_count_participants_by_attention_check.R index f41bf08..09962b3 100644 --- a/r/s2_count_participants_by_attention_check.R +++ b/r/s2_count_participants_by_attention_check.R @@ -1,7 +1,7 @@ # Total number of participants by attention check s2_count_participants_by_attention_check <- function(df) { df %>% - summarise( +dplyr::summarise( across( .cols = c( attention_rejection_correct, diff --git a/renv.lock b/renv.lock index 9fa7491..152c020 100644 --- a/renv.lock +++ b/renv.lock @@ -9,17 +9,6 @@ ] }, "Packages": { - "DBI": { - "Package": "DBI", - "Version": "1.2.3", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "methods" - ], - "Hash": "065ae649b05f1ff66bb0c793107508f5" - }, "Deriv": { "Package": "Deriv", "Version": "4.1.6", @@ -332,7 +321,7 @@ "stringr", "utils" ], - "Hash": "3ce2e7b6bac4445352c2fe84e1d23c1e" + "Hash": "b48b78d2d1540318631dc2c935621928" }, "askpass": { "Package": "askpass", @@ -417,18 +406,6 @@ ], "Hash": "4f572fbc586294afff277db583b9060f" }, - "blob": { - "Package": "blob", - "Version": "1.2.4", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "methods", - "rlang", - "vctrs" - ], - "Hash": "40415719b5a479b87949f3aa0aee737c" - }, "bookdown": { "Package": "bookdown", "Version": "0.42", @@ -541,19 +518,6 @@ ], "Hash": "cd9a672193789068eb5a2aad65a0dedf" }, - "callr": { - "Package": "callr", - "Version": "3.7.6", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "R6", - "processx", - "utils" - ], - "Hash": "d7e13f49c19103ece9e58ad2d83a7354" - }, "car": { "Package": "car", "Version": "3.1-3", @@ -724,19 +688,6 @@ "Repository": "RSPM", "Hash": "14eb0596f987c71535d07c3aff814742" }, - "conflicted": { - "Package": "conflicted", - "Version": "1.2.0", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "cli", - "memoise", - "rlang" - ], - "Hash": "bb097fccb22d156624fd07cd2894ddb6" - }, "corrplot": { "Package": "corrplot", "Version": "0.95", @@ -801,20 +752,6 @@ ], "Hash": "859d96e65ef198fd43e82b9628d593ef" }, - "credentials": { - "Package": "credentials", - "Version": "2.0.2", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "askpass", - "curl", - "jsonlite", - "openssl", - "sys" - ], - "Hash": "09fd631e607a236f8cc7f9604db32cb8" - }, "crosstalk": { "Package": "crosstalk", "Version": "1.2.1", @@ -862,47 +799,6 @@ ], "Hash": "8a37ad13b9b0db70bf32d6eb2fc0a2aa" }, - "dbplyr": { - "Package": "dbplyr", - "Version": "2.5.0", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "DBI", - "R", - "R6", - "blob", - "cli", - "dplyr", - "glue", - "lifecycle", - "magrittr", - "methods", - "pillar", - "purrr", - "rlang", - "tibble", - "tidyr", - "tidyselect", - "utils", - "vctrs", - "withr" - ], - "Hash": "39b2e002522bfd258039ee4e889e0fd1" - }, - "desc": { - "Package": "desc", - "Version": "1.4.3", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R", - "R6", - "cli", - "utils" - ], - "Hash": "99b79fcbd6c4d1ce087f5c5c758b384f" - }, "digest": { "Package": "digest", "Version": "0.6.37", @@ -961,25 +857,6 @@ ], "Hash": "fedd9d00c2944ff00a0e2696ccf048ec" }, - "dtplyr": { - "Package": "dtplyr", - "Version": "1.3.1", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "cli", - "data.table", - "dplyr", - "glue", - "lifecycle", - "rlang", - "tibble", - "tidyselect", - "vctrs" - ], - "Hash": "54ed3ea01b11e81a86544faaecfef8e2" - }, "effectsize": { "Package": "effectsize", "Version": "1.0.0", @@ -1177,28 +1054,6 @@ ], "Hash": "475771e3edb711591476be387c9a8c2e" }, - "gargle": { - "Package": "gargle", - "Version": "1.5.2", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "cli", - "fs", - "glue", - "httr", - "jsonlite", - "lifecycle", - "openssl", - "rappdirs", - "rlang", - "stats", - "utils", - "withr" - ], - "Hash": "fc0b272e5847c58cd5da9b20eedbd026" - }, "gclus": { "Package": "gclus", "Version": "1.3.2", @@ -1221,21 +1076,6 @@ ], "Hash": "15e9634c0fcd294799e9b2e929ed1b86" }, - "gert": { - "Package": "gert", - "Version": "2.1.4", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "askpass", - "credentials", - "openssl", - "rstudioapi", - "sys", - "zip" - ], - "Hash": "ae855ad6d7be20dd7b05d43d25700398" - }, "ggeffects": { "Package": "ggeffects", "Version": "2.1.0", @@ -1419,34 +1259,6 @@ ], "Hash": "c5ba8f5056487403a299b91984be86ca" }, - "gh": { - "Package": "gh", - "Version": "1.4.1", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R", - "cli", - "gitcreds", - "glue", - "httr2", - "ini", - "jsonlite", - "lifecycle", - "rlang" - ], - "Hash": "fbbbc48eba7a6626a08bb365e44b563b" - }, - "gitcreds": { - "Package": "gitcreds", - "Version": "0.1.2", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R" - ], - "Hash": "ab08ac61f3e1be454ae21911eb8bc2fe" - }, "globals": { "Package": "globals", "Version": "0.16.3", @@ -1469,59 +1281,6 @@ ], "Hash": "5899f1eaa825580172bb56c08266f37c" }, - "googledrive": { - "Package": "googledrive", - "Version": "2.1.1", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "cli", - "gargle", - "glue", - "httr", - "jsonlite", - "lifecycle", - "magrittr", - "pillar", - "purrr", - "rlang", - "tibble", - "utils", - "uuid", - "vctrs", - "withr" - ], - "Hash": "e99641edef03e2a5e87f0a0b1fcc97f4" - }, - "googlesheets4": { - "Package": "googlesheets4", - "Version": "1.1.1", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "cellranger", - "cli", - "curl", - "gargle", - "glue", - "googledrive", - "httr", - "ids", - "lifecycle", - "magrittr", - "methods", - "purrr", - "rematch2", - "rlang", - "tibble", - "utils", - "vctrs", - "withr" - ], - "Hash": "d6db1667059d027da730decdc214b959" - }, "gridExtra": { "Package": "gridExtra", "Version": "2.3", @@ -1692,45 +1451,6 @@ ], "Hash": "ac107251d9d9fd72f0ca8049988f1d7f" }, - "httr2": { - "Package": "httr2", - "Version": "1.1.0", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R", - "R6", - "cli", - "curl", - "glue", - "lifecycle", - "magrittr", - "openssl", - "rappdirs", - "rlang", - "vctrs", - "withr" - ], - "Hash": "0f14199bbd820a9fca398f2df40994f1" - }, - "ids": { - "Package": "ids", - "Version": "1.0.1", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "openssl", - "uuid" - ], - "Hash": "99df65cfef20e525ed38c3d2577f7190" - }, - "ini": { - "Package": "ini", - "Version": "0.3.1", - "Source": "Repository", - "Repository": "CRAN", - "Hash": "6154ec2223172bce8162d4153cda21f7" - }, "insight": { "Package": "insight", "Version": "1.0.1", @@ -2405,17 +2125,6 @@ ], "Hash": "ceb5c2a59ba33d42d051285a3e8a5118" }, - "prereg": { - "Package": "prereg", - "Version": "0.6.0", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R", - "rmarkdown" - ], - "Hash": "067f9c808e9beed351ceb96dc09b60f0" - }, "prettyunits": { "Package": "prettyunits", "Version": "1.2.0", @@ -2426,19 +2135,6 @@ ], "Hash": "6b01fc98b1e86c4f705ce9dcfd2f57c7" }, - "processx": { - "Package": "processx", - "Version": "3.8.5", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "R6", - "ps", - "utils" - ], - "Hash": "790b1edafbd9980aeb8c3d77e3b0ba33" - }, "productplots": { "Package": "productplots", "Version": "0.1.1", @@ -2480,17 +2176,6 @@ ], "Hash": "c84fd4f75ea1f5434735e08b7f50fbca" }, - "ps": { - "Package": "ps", - "Version": "1.8.1", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "utils" - ], - "Hash": "b4404b1de13758dea1c0484ad0d48563" - }, "psych": { "Package": "psych", "Version": "2.4.12", @@ -2549,30 +2234,6 @@ ], "Hash": "c48844cd7961de506a1b4d22b2e082c7" }, - "ragg": { - "Package": "ragg", - "Version": "1.3.3", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "systemfonts", - "textshaping" - ], - "Hash": "0595fe5e47357111f29ad19101c7d271" - }, - "ranger": { - "Package": "ranger", - "Version": "0.17.0", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "Matrix", - "R", - "Rcpp", - "RcppEigen" - ], - "Hash": "9fe0f505fe36cac3207e0790d21b3676" - }, "rappdirs": { "Package": "rappdirs", "Version": "0.3.3", @@ -2664,16 +2325,6 @@ "Repository": "RSPM", "Hash": "cbff1b666c6fa6d21202f07e2318d4f1" }, - "rematch2": { - "Package": "rematch2", - "Version": "2.1.2", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "tibble" - ], - "Hash": "76c9e04c712a05848ae7a23d2f170a40" - }, "renv": { "Package": "renv", "Version": "1.0.11", @@ -2700,28 +2351,6 @@ ], "Hash": "1393acc49816f4fe143d87fb33e75631" }, - "reprex": { - "Package": "reprex", - "Version": "2.1.1", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "callr", - "cli", - "clipr", - "fs", - "glue", - "knitr", - "lifecycle", - "rlang", - "rmarkdown", - "rstudioapi", - "utils", - "withr" - ], - "Hash": "97b1d5361a24d9fb588db7afe3e5bcbf" - }, "reshape2": { "Package": "reshape2", "Version": "1.4.4", @@ -2860,41 +2489,6 @@ "Repository": "RSPM", "Hash": "5f90cd73946d706cfe26024294236113" }, - "rticles": { - "Package": "rticles", - "Version": "0.27", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "knitr", - "lifecycle", - "rmarkdown", - "tinytex", - "utils", - "xfun", - "yaml" - ], - "Hash": "1a05d2cc06f9fb360d2dc15f99293dfe" - }, - "rvest": { - "Package": "rvest", - "Version": "1.0.4", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "cli", - "glue", - "httr", - "lifecycle", - "magrittr", - "rlang", - "selectr", - "tibble", - "xml2" - ], - "Hash": "0bcf0c6f274e90ea314b812a6d19a519" - }, "sass": { "Package": "sass", "Version": "0.4.9", @@ -2929,19 +2523,6 @@ ], "Hash": "c19df082ba346b0ffa6f833e92de34d1" }, - "selectr": { - "Package": "selectr", - "Version": "0.4-2", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "R6", - "methods", - "stringr" - ], - "Hash": "3838071b66e0c566d55cc26bd6e27bf4" - }, "seriation": { "Package": "seriation", "Version": "1.5.7", @@ -3154,22 +2735,6 @@ ], "Hash": "f8b2924480a2679e2bad9750646112fe" }, - "textshaping": { - "Package": "textshaping", - "Version": "1.0.0", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "cpp11", - "lifecycle", - "stats", - "stringi", - "systemfonts", - "utils" - ], - "Hash": "5d44adc8145c718066b0bc374d142ca1" - }, "tibble": { "Package": "tibble", "Version": "3.2.1", @@ -3251,46 +2816,6 @@ ], "Hash": "612125521ebc22fd028182761211b5d9" }, - "tidyverse": { - "Package": "tidyverse", - "Version": "2.0.0", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R", - "broom", - "cli", - "conflicted", - "dbplyr", - "dplyr", - "dtplyr", - "forcats", - "ggplot2", - "googledrive", - "googlesheets4", - "haven", - "hms", - "httr", - "jsonlite", - "lubridate", - "magrittr", - "modelr", - "pillar", - "purrr", - "ragg", - "readr", - "readxl", - "reprex", - "rlang", - "rstudioapi", - "rvest", - "stringr", - "tibble", - "tidyr", - "xml2" - ], - "Hash": "c328568cd14ea89a83bd4ca7f54ae07e" - }, "timechange": { "Package": "timechange", "Version": "0.3.0", @@ -3336,38 +2861,6 @@ ], "Hash": "f561504ec2897f4d46f0c7657e488ae1" }, - "usethis": { - "Package": "usethis", - "Version": "3.1.0", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "R", - "cli", - "clipr", - "crayon", - "curl", - "desc", - "fs", - "gert", - "gh", - "glue", - "jsonlite", - "lifecycle", - "purrr", - "rappdirs", - "rlang", - "rprojroot", - "rstudioapi", - "stats", - "tools", - "utils", - "whisker", - "withr", - "yaml" - ], - "Hash": "0d7f5ca181f9b1e68b217bd93b6cc703" - }, "utf8": { "Package": "utf8", "Version": "1.2.4", @@ -3378,16 +2871,6 @@ ], "Hash": "62b65c52671e6665f803ff02954446e9" }, - "uuid": { - "Package": "uuid", - "Version": "1.2-1", - "Source": "Repository", - "Repository": "RSPM", - "Requirements": [ - "R" - ], - "Hash": "34e965e62a41fcafb1ca60e9b142085b" - }, "vctrs": { "Package": "vctrs", "Version": "0.6.5", @@ -3466,13 +2949,6 @@ ], "Hash": "390f9315bc0025be03012054103d227c" }, - "whisker": { - "Package": "whisker", - "Version": "0.4.1", - "Source": "Repository", - "Repository": "CRAN", - "Hash": "c6abfa47a46d281a7d5159d0a8891e88" - }, "withr": { "Package": "withr", "Version": "3.0.2", @@ -3485,31 +2961,6 @@ ], "Hash": "cc2d62c76458d425210d1eb1478b30b4" }, - "worcs": { - "Package": "worcs", - "Version": "0.1.17", - "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "cli", - "credentials", - "digest", - "gert", - "gh", - "methods", - "prereg", - "ranger", - "renv", - "rlang", - "rmarkdown", - "rticles", - "tinytex", - "usethis", - "xfun", - "yaml" - ], - "Hash": "e02d083bcdf7123d12d5225f9eb71e7d" - }, "wordcloud": { "Package": "wordcloud", "Version": "2.6", @@ -3573,13 +3024,6 @@ "Source": "Repository", "Repository": "RSPM", "Hash": "51dab85c6c98e50a18d7551e9d49f76c" - }, - "zip": { - "Package": "zip", - "Version": "2.3.1", - "Source": "Repository", - "Repository": "CRAN", - "Hash": "fcc4bd8e6da2d2011eb64a5e5cc685ab" } } }