Brandon Stewart commited on
Commit
649d4d3
·
0 Parent(s):

Version 1.0

Browse files
.codeocean/environment.json ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "base_image": "registry.codeocean.com/codeocean/py-r:python3.10.12-R4.3.2-JupyterLab4.0.10-RStudiorstudio-server-2023.12.0-369-ubuntu22.04",
4
+ "options": {},
5
+ "installers": {
6
+ "pip": {
7
+ "packages": [
8
+ {
9
+ "name": "matplotlib",
10
+ "version": "3.10.0"
11
+ },
12
+ {
13
+ "name": "numpy",
14
+ "version": "1.26.4"
15
+ },
16
+ {
17
+ "name": "pandas",
18
+ "version": "2.2.3"
19
+ },
20
+ {
21
+ "name": "rpy2",
22
+ "version": "3.5.17"
23
+ },
24
+ {
25
+ "name": "seaborn"
26
+ },
27
+ {
28
+ "name": "stargazer",
29
+ "version": "0.0.7"
30
+ }
31
+ ],
32
+ "options": {},
33
+ "pre_install_options": {}
34
+ },
35
+ "rcran": {
36
+ "packages": [
37
+ {
38
+ "name": "car",
39
+ "version": "3.1-3"
40
+ },
41
+ {
42
+ "name": "corrplot",
43
+ "version": "0.95"
44
+ },
45
+ {
46
+ "name": "covr",
47
+ "version": "3.6.4"
48
+ },
49
+ {
50
+ "name": "doParallel",
51
+ "version": "1.0.17"
52
+ },
53
+ {
54
+ "name": "fastDummies",
55
+ "version": "1.7.5"
56
+ },
57
+ {
58
+ "name": "feather",
59
+ "version": "0.3.5"
60
+ },
61
+ {
62
+ "name": "ggtext",
63
+ "version": "0.1.2"
64
+ },
65
+ {
66
+ "name": "janitor",
67
+ "version": "2.2.1"
68
+ },
69
+ {
70
+ "name": "lubridate",
71
+ "version": "1.9.4"
72
+ },
73
+ {
74
+ "name": "mockr",
75
+ "version": "0.2.1"
76
+ },
77
+ {
78
+ "name": "psych",
79
+ "version": "2.4.12"
80
+ },
81
+ {
82
+ "name": "randomizr",
83
+ "version": "1.0.0"
84
+ },
85
+ {
86
+ "name": "sandwich",
87
+ "version": "3.1-1"
88
+ },
89
+ {
90
+ "name": "stargazer",
91
+ "version": "5.2.3"
92
+ },
93
+ {
94
+ "name": "systemfonts",
95
+ "version": "1.2.1"
96
+ },
97
+ {
98
+ "name": "textshaping",
99
+ "version": "1.0.0"
100
+ },
101
+ {
102
+ "name": "tidyverse",
103
+ "version": "2.0.0"
104
+ }
105
+ ]
106
+ },
107
+ "vscode": {
108
+ "packages": [
109
+ {
110
+ "name": "REditorSupport.R"
111
+ },
112
+ {
113
+ "name": "continue.continue"
114
+ },
115
+ {
116
+ "name": "ms-python.python"
117
+ },
118
+ {
119
+ "name": "ms-toolsai.jupyter"
120
+ },
121
+ {
122
+ "name": "reageyao.bioSyntax"
123
+ },
124
+ {
125
+ "name": "saoudrizwan.claude-dev"
126
+ }
127
+ ],
128
+ "version": "4.95.3"
129
+ }
130
+ }
131
+ }
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ /data/
2
+ .Rproj.user
3
+
4
+ .vscode
code/04_postprocessing_exploration_issues12.R ADDED
@@ -0,0 +1,972 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: 04_postprocessing_exploration_issues12.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ ## YouTube Algorithms and Minimum Wage Opinions
9
+ ## Data collected May-June 2022 via MTurk/CloudResearch
10
+
11
+ ## Preamble ----------------------------
12
+ library(tidyverse)
13
+ library(janitor)
14
+ library(lubridate)
15
+ library(stargazer)
16
+ library(broom)
17
+ library(patchwork)
18
+
19
+ # plotting w/ custom colors (optional)
20
+ red_mit = '#A31F34'
21
+ red_light = '#A9606C'
22
+ blue_mit = '#315485'
23
+ grey_light= '#C2C0BF'
24
+ grey_dark = '#8A8B8C'
25
+ black = '#353132'
26
+ vpurple = "#440154FF"
27
+ vyellow = "#FDE725FF"
28
+ vgreen = "#21908CFF"
29
+
30
+ ## edited 13 june 2024 at request of reviewers ---------------------------------
31
+
32
+ understanding_1 <-
33
+ read_csv('../results/intermediate data/gun control (issue 1)/guncontrol_understanding_basecontrol_pretty.csv') %>%
34
+ mutate(
35
+ layer2_treatmentcontrast = recode(
36
+ layer2_treatmentcontrast,
37
+ "31 pro - 22 pro" = "con 31 - con 22",
38
+ "anti 31 - anti 22" = "lib 31 - lib 22",
39
+ "31 neutral anti - 22 neutral anti" = "neutral lib 31 - neutral lib 22",
40
+ "22 neutral pro - 22 neutral anti" = "neutral con 22 - neutral lib 22",
41
+ "31 neutral pro - 31 neutral anti" = "neutral con 31 - neutral lib 31",
42
+ "31 neutral pro - 22 neutral pro" = "neutral con 31 - neutral con 22"
43
+ )
44
+ )
45
+
46
+
47
+ understanding_2 <-
48
+ read_csv('../results/intermediate data/minimum wage (issue 2)/understanding_basecontrol_pretty.csv')
49
+ understanding_2 <- understanding_2 %>%
50
+ mutate(
51
+ layer2_treatmentcontrast = recode(
52
+ layer2_treatmentcontrast,
53
+ "31 pro - 22 pro" = "con 31 - con 22",
54
+ "anti 31 - anti 22" = "lib 31 - lib 22",
55
+ "31 neutral anti - 22 neutral anti" = "neutral lib 31 - neutral lib 22",
56
+ "22 neutral anti - 22 neutral pro" = "neutral con 22 - neutral lib 22",
57
+ "31 neutral anti - 31 neutral pro" = "neutral con 31 - neutral lib 31",
58
+ "31 neutral pro - 22 neutral pro" = "neutral con 31 - neutral con 22"
59
+ )
60
+ )
61
+
62
+
63
+ understanding_3 <- read_csv('../results/intermediate data/minimum wage (issue 2)/understanding_basecontrol_pretty_yg.csv')
64
+ understanding_3 <- understanding_3 %>%
65
+ mutate(
66
+ layer2_treatmentcontrast = recode(
67
+ layer2_treatmentcontrast,
68
+ "31 pro - 22 pro" = "con 31 - con 22",
69
+ "anti 31 - anti 22" = "lib 31 - lib 22",
70
+ "31 neutral anti - 22 neutral anti" = "neutral lib 31 - neutral lib 22",
71
+ "22 neutral anti - 22 neutral pro" = "neutral con 22 - neutral lib 22",
72
+ "31 neutral anti - 31 neutral pro" = "neutral con 31 - neutral lib 31",
73
+ "31 neutral pro - 22 neutral pro" = "neutral con 31 - neutral con 22"
74
+ )
75
+ )
76
+
77
+ understanding_1$Study <- 1
78
+ understanding_2$Study <- 2
79
+ understanding_3$Study <- 3
80
+
81
+ understanding <- rbind(understanding_1,
82
+ understanding_2,
83
+ understanding_3
84
+ )
85
+ understanding$Study <- factor(understanding$Study,
86
+ levels = 3:1,
87
+ labels = c('Minimum Wage\n(YouGov)',
88
+ 'Minimum Wage\n(MTurk)',
89
+ 'Gun Control\n(MTurk)'
90
+ )
91
+ )
92
+
93
+ understanding <- understanding %>%
94
+ mutate(outcome =
95
+ recode(layer3_specificoutcome,
96
+ 'right_to_own_importance_w2' = 'Question 1:\nRight to own more important than regulation (Gun Control)\nRestricts business freedom to set policy (Minimum Wage)',
97
+ 'concealed_safe_w2' = 'Question 2:\nMore concealed carry makes US safer (Gun Control)\nRaising hurts low-income workers (Minimum Wage)',
98
+ 'mw_restrict_w2' = 'Question 1:\nRight to own more important than regulation (Gun Control)\nRestricts business freedom to set policy (Minimum Wage)',
99
+ 'mw_help_w2' = 'Question 2:\nMore concealed carry makes US safer (Gun Control)\nRaising hurts low-income workers (Minimum Wage)'
100
+ )
101
+ )
102
+
103
+ understanding <- understanding %>%
104
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
105
+ ci_hi_99 = est + qnorm(0.999)*se,
106
+ ci_lo_95 = est + qnorm(0.025)*se,
107
+ ci_hi_95 = est + qnorm(0.975)*se,
108
+ ci_lo_90 = est + qnorm(0.05)*se,
109
+ ci_hi_90 = est + qnorm(0.95)*se
110
+ )
111
+
112
+ understanding <- understanding %>%
113
+ mutate(
114
+ contrast = ifelse(
115
+ layer2_treatmentcontrast %in% c("neutral con 31 - neutral lib 31",
116
+ "neutral con 22 - neutral lib 22"
117
+ ),
118
+ yes = 'seed',
119
+ no = 'algorithm'
120
+ )
121
+ )
122
+
123
+ understanding$layer2_treatmentcontrast <- factor(
124
+ understanding$layer2_treatmentcontrast,
125
+ levels = c('lib 31 - lib 22',
126
+ 'neutral lib 31 - neutral lib 22',
127
+ 'neutral con 31 - neutral con 22',
128
+ 'con 31 - con 22',
129
+ 'neutral con 31 - neutral lib 31',
130
+ 'neutral con 22 - neutral lib 22'
131
+ ),
132
+ labels = c('Liberal respondents,\nliberal seed',
133
+ 'Moderate respondents,\nliberal seed',
134
+ 'Moderate respondents,\nconservative seed',
135
+ 'Conservative respondents,\nconservative seed',
136
+ 'Moderate respondents,\n3/1 algorithm',
137
+ 'Moderate respondents,\n2/2 algorithm'
138
+ ),
139
+ ordered = TRUE
140
+ )
141
+
142
+ understanding_plot_algo <- ggplot(
143
+ understanding %>% filter(contrast == 'algorithm'),
144
+ aes(x = layer2_treatmentcontrast,
145
+ group = Study,
146
+ color = p.adj < 0.05
147
+ )
148
+ ) +
149
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95),
150
+ position=position_dodge(width=0.5),
151
+ width=0,
152
+ lwd=0.5
153
+ ) +
154
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90),
155
+ position=position_dodge(width=0.5),
156
+ width=0,
157
+ lwd=1
158
+ ) +
159
+ geom_point(aes(y=est,shape=Study),
160
+ position=position_dodge(width=0.5),
161
+ size=2
162
+ ) +
163
+ geom_hline(yintercept = 0,lty=2) +
164
+ facet_wrap( ~ outcome,scales="free") +
165
+ scale_color_manual(breaks=c(F,T),values = c("black","blue"),guide="none") +
166
+ coord_flip(ylim=c(-0.1,0.2)) +
167
+ theme_bw(base_family = "sans") +
168
+ theme(strip.background = element_rect(fill="white"),legend.position = "none") +
169
+ ylab('Treatment effect of 3/1 vs. 2/2 algorithm (95% and 90% CIs)') +
170
+ xlab(NULL)
171
+ understanding_plot_algo
172
+
173
+
174
+ understanding_plot_seed <- ggplot(
175
+ understanding %>% filter(contrast == 'seed'),
176
+ aes(x = layer2_treatmentcontrast,
177
+ group = Study,
178
+ color = p.adj < 0.05
179
+ )
180
+ ) +
181
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95),
182
+ position=position_dodge(width=0.5),
183
+ width=0,
184
+ lwd=0.5
185
+ ) +
186
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90),
187
+ position=position_dodge(width=0.5),
188
+ width=0,
189
+ lwd=1
190
+ ) +
191
+ geom_point(aes(y=est,shape=Study),
192
+ position=position_dodge(width=0.5),
193
+ size=2
194
+ ) +
195
+ geom_hline(yintercept = 0,lty=2) +
196
+ facet_wrap(~ outcome,scales="free") +
197
+ scale_color_manual(breaks=c(F,T),values = c("black","blue"),guide="none") +
198
+ coord_flip(ylim=c(-0.1,0.2)) +
199
+ theme_bw(base_family = "sans") +
200
+ theme(strip.background = element_rect(fill="white"),legend.position = "bottom",legend.margin = margin(0,0,0,-3,"lines")) +
201
+ ylab('Treatment effect of conservative seed vs. liberal seed video (95% and 90% CIs)') +
202
+ xlab(NULL)
203
+
204
+ understanding_plot <- (understanding_plot_algo / understanding_plot_seed) +
205
+ plot_layout(heights = c(2, 1))
206
+
207
+ ggsave(understanding_plot,
208
+ filename = "../results/understanding_3studies.png",width=12,height=8.5)
209
+
210
+ ## Base-control Figures ----------------------------------------------------
211
+
212
+ coefs_basecontrol_guns <- read_csv("../results/intermediate data/gun control (issue 1)/guncontrol_padj_basecontrol_pretty.csv") %>%
213
+ mutate(est = case_when(layer3_specificoutcome=="pro_fraction_chosen" ~ -1*est,
214
+ layer3_specificoutcome!="pro_fraction_chosen" ~ est),
215
+ layer2_treatmentcontrast = dplyr::recode(layer2_treatmentcontrast,
216
+ "pro 31 - pro 22"="con 31 - con 22",
217
+ "anti 31 - anti 22"="lib 31 - lib 22",
218
+ "neutral anti 31 - neutral anti 22"="neutral lib 31 - neutral lib 22",
219
+ "neutral pro 22 - neutral anti 22"="neutral con 22 - neutral lib 22",
220
+ "neutral pro 31 - neutral anti 31"="neutral con 31 - neutral lib 31",
221
+ "neutral pro 31 - neutral pro 22"="neutral con 31 - neutral con 22"
222
+ ))
223
+ coefs_basecontrol <- read_csv("../results/intermediate data/minimum wage (issue 2)/padj_basecontrol_pretty.csv") %>%
224
+ mutate(layer2_treatmentcontrast = dplyr::recode(layer2_treatmentcontrast,
225
+ "pro 31 - pro 22"="lib 31 - lib 22",
226
+ "anti 31 - anti 22"="con 31 - con 22",
227
+ "neutral anti 31 - neutral anti 22"="neutral con 31 - neutral con 22",
228
+ "neutral anti 22 - neutral pro 22"="neutral con 22 - neutral lib 22",
229
+ "neutral anti 31 - neutral pro 31"="neutral con 31 - neutral lib 31",
230
+ "neutral pro 31 - neutral pro 22"="neutral lib 31 - neutral lib 22"
231
+ ))
232
+ coefs_basecontrol_yg <- read_csv("../results/intermediate data/minimum wage (issue 2)/padj_basecontrol_pretty_yg.csv") %>%
233
+ mutate(layer2_treatmentcontrast = dplyr::recode(layer2_treatmentcontrast,
234
+ "pro 31 - pro 22"="lib 31 - lib 22",
235
+ "anti 31 - anti 22"="con 31 - con 22",
236
+ "neutral anti 31 - neutral anti 22"="neutral con 31 - neutral con 22",
237
+ "neutral anti 22 - neutral pro 22"="neutral con 22 - neutral lib 22",
238
+ "neutral anti 31 - neutral pro 31"="neutral con 31 - neutral lib 31",
239
+ "neutral pro 31 - neutral pro 22"="neutral lib 31 - neutral lib 22"
240
+ ))
241
+ coefs_basecontrol <- bind_rows(mutate(coefs_basecontrol_guns,Sample="Gun Control\n(MTurk)"),
242
+ mutate(coefs_basecontrol,Sample="Minimum Wage\n(MTurk)"),
243
+ mutate(coefs_basecontrol_yg,Sample="Minimum Wage\n(YouGov)")) %>%
244
+ mutate(Sample = factor(Sample,levels=c("Minimum Wage\n(YouGov)","Minimum Wage\n(MTurk)","Gun Control\n(MTurk)"),ordered=T)) %>%
245
+ mutate(layer1_hypothesisfamily = recode(layer1_hypothesisfamily,
246
+ "gunpolicy"="policy",
247
+ "mwpolicy"="policy"),
248
+ layer3_specificoutcome = recode(layer3_specificoutcome,
249
+ "gun_index_w2"="policyindex",
250
+ "mw_index_w2"="policyindex"))
251
+
252
+ # look at significant effects:
253
+ coefs_basecontrol %>% filter(!str_detect(layer2_treatmentcontrast,"neutral") & p.adj < .05 & layer3_specificoutcome != 'overall')
254
+
255
+
256
+ coefs_basecontrol %>% filter(str_detect(layer2_treatmentcontrast,"neutral") & p.adj < .05 & layer3_specificoutcome != 'overall' &
257
+ ((str_detect(layer2_treatmentcontrast,"lib") & !str_detect(layer2_treatmentcontrast,"con")) |
258
+ !(str_detect(layer2_treatmentcontrast,"lib") & str_detect(layer2_treatmentcontrast,"con"))))
259
+
260
+ outcome_labels <- data.frame(outcome = c(
261
+ "Liberal videos\nchosen (fraction)",
262
+ "Likes & saves\nminus dislikes (#)",
263
+ "Total watch\ntime (hrs)",
264
+ "Policy\nindex",
265
+ "Trust in\nmajor news",
266
+ "Trust in\nYouTube",
267
+ "Never fabrication\nby major news",
268
+ "Never fabrication\nby YouTube",
269
+ "Perceived intelligence",
270
+ "Feeling thermometer",
271
+ "Comfort as friend"),
272
+ specificoutcome = c(
273
+ "pro_fraction_chosen",
274
+ "positive_interactions",
275
+ "platform_duration",
276
+ "policyindex",
277
+ "trust_majornews_w2",
278
+ "trust_youtube_w2",
279
+ "fabricate_majornews_w2",
280
+ "fabricate_youtube_w2",
281
+ "affpol_smart_w2",
282
+ "affpol_ft_w2",
283
+ "affpol_comfort_w2"),
284
+ family = c(
285
+ rep("Platform Interaction",3),
286
+ rep("Policy Attitudes\n(unit scale, + is more conservative)",1),
287
+ rep("Media Trust\n(unit scale, + is more trusting)",4),
288
+ rep("Affective Polarization\n(unit scale, + is greater polarization)",3))
289
+ )
290
+
291
+ ##### Liberals #####
292
+ coefs_third1_basecontrol <- coefs_basecontrol %>%
293
+ filter(layer2_treatmentcontrast == "lib 31 - lib 22" &
294
+ layer3_specificoutcome != "overall")
295
+
296
+ coefs_third1_basecontrol$outcome = outcome_labels$outcome[match(coefs_third1_basecontrol$layer3_specificoutcome,
297
+ outcome_labels$specificoutcome)]
298
+
299
+
300
+ coefs_third1_basecontrol$family = outcome_labels$family[match(coefs_third1_basecontrol$layer3_specificoutcome,
301
+ outcome_labels$specificoutcome)]
302
+
303
+
304
+ coefs_third1_basecontrol <- mutate(coefs_third1_basecontrol,
305
+ family = factor(family,
306
+ levels = c(
307
+ "Policy Attitudes\n(unit scale, + is more conservative)",
308
+ "Platform Interaction",
309
+ "Media Trust\n(unit scale, + is more trusting)",
310
+ "Affective Polarization\n(unit scale, + is greater polarization)"),ordered = T))
311
+
312
+ ## manipulate to get all unit scales:
313
+ coefs_third1_basecontrol$est[coefs_third1_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third1_basecontrol$est[coefs_third1_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
314
+ coefs_third1_basecontrol$se[coefs_third1_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third1_basecontrol$se[coefs_third1_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
315
+
316
+ coefs_third1_basecontrol$est[coefs_third1_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third1_basecontrol$est[coefs_third1_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
317
+ coefs_third1_basecontrol$se[coefs_third1_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third1_basecontrol$se[coefs_third1_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
318
+
319
+ coefs_third1_basecontrol <- coefs_third1_basecontrol %>%
320
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
321
+ ci_hi_99 = est + qnorm(0.999)*se,
322
+ ci_lo_95 = est + qnorm(0.025)*se,
323
+ ci_hi_95 = est + qnorm(0.975)*se,
324
+ ci_lo_90 = est + qnorm(0.05)*se,
325
+ ci_hi_90 = est + qnorm(0.95)*se,
326
+ plotorder = rep((nrow(coefs_third1_basecontrol)/3):1,3),
327
+ alpha = ifelse(p.adj<0.05, T, F),
328
+ alpha = as.logical(alpha),
329
+ alpha = replace_na(alpha,F),
330
+ Sample_color = as.character(Sample),
331
+ Sample_color = replace(Sample_color,alpha==F,"insig")
332
+ )
333
+ tabyl(coefs_third1_basecontrol,Sample_color)
334
+
335
+ (coefplot_third1_basecontrol <- ggplot(filter(coefs_third1_basecontrol),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
336
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5,alpha=0.25) +
337
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1,alpha=0.25) +
338
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3,alpha=0.25) +
339
+ geom_text(data=filter(coefs_third1_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
340
+ geom_hline(yintercept = 0,lty=2) +
341
+ facet_wrap(~family,ncol=1,scales="free") +
342
+ scale_x_continuous("",
343
+ breaks = coefs_third1_basecontrol$plotorder,labels = coefs_third1_basecontrol$outcome) +
344
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all liberal seed\n(95% and 90% CIs)") +
345
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
346
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
347
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
348
+ coord_flip() +
349
+ theme_bw(base_family = "sans") +
350
+ theme(strip.background = element_rect(fill="white"),
351
+ legend.position = "none",
352
+ )
353
+ )
354
+ ggsave(coefplot_third1_basecontrol,
355
+ filename = "../results/coefplot_third1_basecontrol_3studies.png",width=5,height=8.5)
356
+ ggsave(coefplot_third1_basecontrol,
357
+ filename = "../results/coefplot_third1_basecontrol_3studies.pdf",width=5,height=8.5)
358
+
359
+ (coefplot_third1_basecontrol_empty <- ggplot(filter(coefs_third1_basecontrol),aes(x=plotorder,group=Sample,alpha=alpha,col=Sample)) +
360
+ geom_blank(aes(ymin=ci_lo_95,ymax=ci_hi_95),position=position_dodge(width=0.5),width=0,lwd=0.5) +
361
+ geom_blank(aes(ymin=ci_lo_90,ymax=ci_hi_90),position=position_dodge(width=0.5),width=0,lwd=1) +
362
+ geom_blank(aes(y=est,shape=Sample),position=position_dodge(width=0.5),size=3) +
363
+ geom_blank(data=filter(coefs_third1_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),position=position_dodge(width=0.5),size=3) +
364
+ geom_hline(yintercept = 0,lty=2) +
365
+ facet_wrap(~family,ncol=1,scales="free") +
366
+ scale_x_continuous("",
367
+ breaks = coefs_third1_basecontrol$plotorder,labels = coefs_third1_basecontrol$outcome) +
368
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all liberal seed\n(95% and 90% CIs)") +
369
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
370
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
371
+ coord_flip() +
372
+ theme_bw(base_family = "sans") +
373
+ theme(strip.background = element_rect(fill="white"),legend.position = "none")
374
+ )
375
+ ggsave(coefplot_third1_basecontrol_empty,
376
+ filename = "../results/coefplot_third1_basecontrol_empty_3studies.png",width=5,height=8.5)
377
+
378
+ (coefplot_third1_basecontrol_3studies_toptwo <- ggplot(filter(coefs_third1_basecontrol,layer1_hypothesisfamily %in% c("policy","platform")),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
379
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5,alpha=0.25) +
380
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1,alpha=0.25) +
381
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3,alpha=0.25) +
382
+ geom_text(data=filter(coefs_third1_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
383
+ geom_hline(yintercept = 0,lty=2) +
384
+ facet_wrap(~family,ncol=1,scales="free") +
385
+ scale_x_continuous("",
386
+ breaks = coefs_third1_basecontrol$plotorder,labels = coefs_third1_basecontrol$outcome) +
387
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all liberal seed\n(95% and 90% CIs)") +
388
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
389
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
390
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
391
+ coord_flip() +
392
+ theme_bw(base_family = "sans") +
393
+ theme(strip.background = element_rect(fill="white"),
394
+ legend.position = "none",
395
+ )
396
+ )
397
+ ggsave(coefplot_third1_basecontrol_3studies_toptwo,
398
+ filename = "../results/coefplot_third1_basecontrol_3studies_toptwo.png",width=5,height=4.75)
399
+ ggsave(coefplot_third1_basecontrol_3studies_toptwo,
400
+ filename = "../results/coefplot_third1_basecontrol_3studies_toptwo.pdf",width=5,height=4.75)
401
+
402
+
403
+ ##### Conservatives #####
404
+
405
+ coefs_third3_basecontrol <- coefs_basecontrol %>%
406
+ filter(layer2_treatmentcontrast == "con 31 - con 22" &
407
+ layer3_specificoutcome != "overall")
408
+
409
+ coefs_third3_basecontrol$outcome = outcome_labels$outcome[match(coefs_third3_basecontrol$layer3_specificoutcome,
410
+ outcome_labels$specificoutcome)]
411
+
412
+ coefs_third3_basecontrol$family = outcome_labels$family[match(coefs_third3_basecontrol$layer3_specificoutcome,
413
+ outcome_labels$specificoutcome)]
414
+
415
+ coefs_third3_basecontrol <- mutate(coefs_third3_basecontrol,
416
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)","Platform Interaction","Media Trust\n(unit scale, + is more trusting)","Affective Polarization\n(unit scale, + is greater polarization)"),ordered = T))
417
+
418
+ ## manipulate to get all unit scales:
419
+ coefs_third3_basecontrol$est[coefs_third3_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third3_basecontrol$est[coefs_third3_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
420
+ coefs_third3_basecontrol$se[coefs_third3_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third3_basecontrol$se[coefs_third3_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
421
+
422
+ coefs_third3_basecontrol$est[coefs_third3_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third3_basecontrol$est[coefs_third3_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
423
+ coefs_third3_basecontrol$se[coefs_third3_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third3_basecontrol$se[coefs_third3_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
424
+
425
+ coefs_third3_basecontrol <- coefs_third3_basecontrol %>%
426
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
427
+ ci_hi_99 = est + qnorm(0.999)*se,
428
+ ci_lo_95 = est + qnorm(0.025)*se,
429
+ ci_hi_95 = est + qnorm(0.975)*se,
430
+ ci_lo_90 = est + qnorm(0.05)*se,
431
+ ci_hi_90 = est + qnorm(0.95)*se,
432
+ plotorder = rep((nrow(coefs_third3_basecontrol)/3):1,3),
433
+ alpha = ifelse(p.adj<0.05, T, F),
434
+ alpha = as.logical(alpha),
435
+ alpha = replace_na(alpha,F),
436
+ Sample_color = as.character(Sample),
437
+ Sample_color = replace(Sample_color,alpha==F,"insig")
438
+ )
439
+
440
+
441
+ (coefplot_third3_basecontrol <- ggplot(filter(coefs_third3_basecontrol),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
442
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
443
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
444
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
445
+ geom_text(data=filter(coefs_third3_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
446
+ geom_hline(yintercept = 0,lty=2) +
447
+ facet_wrap(~family,ncol=1,scales="free") +
448
+ scale_x_continuous("",
449
+ breaks = coefs_third3_basecontrol$plotorder,labels = coefs_third3_basecontrol$outcome) +
450
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all conservative seed\n(95% and 90% CIs)") +
451
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
452
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
453
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
454
+ coord_flip() +
455
+ theme_bw(base_family = "sans") +
456
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
457
+ )
458
+
459
+ ggsave(coefplot_third3_basecontrol,
460
+ filename = "../results/coefplot_third3_basecontrol_3studies.png",width=5,height=8.5)
461
+ ggsave(coefplot_third3_basecontrol,
462
+ filename = "../results/coefplot_third3_basecontrol_3studies.pdf",width=5,height=8.5)
463
+
464
+ (coefplot_third3_basecontrol_empty <- ggplot(filter(coefs_third3_basecontrol),aes(x=plotorder,group=Sample,col=ifelse(p.adj<0.05,T,F))) +
465
+ geom_blank(aes(ymin=ci_lo_95,ymax=ci_hi_95),position=position_dodge(width=0.5),width=0,lwd=0.5) +
466
+ geom_blank(aes(ymin=ci_lo_90,ymax=ci_hi_90),position=position_dodge(width=0.5),width=0,lwd=1) +
467
+ geom_blank(aes(y=est,shape=Sample),position=position_dodge(width=0.5),size=2) +
468
+ geom_blank(data=filter(coefs_third3_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),position=position_dodge(width=0.5),size=3) +
469
+ geom_hline(yintercept = 0,lty=2) +
470
+ facet_wrap(~family,ncol=1,scales="free") +
471
+ scale_x_continuous("",
472
+ breaks = coefs_third3_basecontrol$plotorder,labels = coefs_third3_basecontrol$outcome) +
473
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all conservative seed\n(95% and 90% CIs)") +
474
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
475
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
476
+ coord_flip(ylim=c(-0.17,0.17)) +
477
+ theme_bw(base_family = "sans") +
478
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
479
+ )
480
+ ggsave(coefplot_third3_basecontrol_empty,
481
+ filename = "../results/coefplot_third3_basecontrol_empty_3studies.png",width=5,height=8.5)
482
+
483
+ (coefplot_third3_basecontrol_toptwo <- ggplot(filter(coefs_third3_basecontrol,layer1_hypothesisfamily %in% c("policy","platform")),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
484
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
485
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
486
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
487
+ geom_text(data=filter(coefs_third3_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
488
+ geom_hline(yintercept = 0,lty=2) +
489
+ facet_wrap(~family,ncol=1,scales="free") +
490
+ scale_x_continuous("",
491
+ breaks = coefs_third3_basecontrol$plotorder,labels = coefs_third3_basecontrol$outcome) +
492
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all conservative seed\n(95% and 90% CIs)") +
493
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
494
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
495
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
496
+ coord_flip() +
497
+ theme_bw(base_family = "sans") +
498
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
499
+ )
500
+
501
+ ggsave(coefplot_third3_basecontrol_toptwo,
502
+ filename = "../results/coefplot_third3_basecontrol_3studies_toptwo.png",width=5,height=4.75)
503
+ ggsave(coefplot_third3_basecontrol_toptwo,
504
+ filename = "../results/coefplot_third3_basecontrol_3studies_toptwo.pdf",width=5,height=4.75)
505
+
506
+
507
+ ##### Moderates (algorithm) #####
508
+
509
+ coefs_third2_pro_basecontrol <- coefs_basecontrol %>%
510
+ filter(layer2_treatmentcontrast == "neutral lib 31 - neutral lib 22" &
511
+ layer3_specificoutcome != "overall")
512
+
513
+ coefs_third2_pro_basecontrol$outcome = outcome_labels$outcome[match(coefs_third2_pro_basecontrol$layer3_specificoutcome,
514
+ outcome_labels$specificoutcome)]
515
+
516
+ coefs_third2_pro_basecontrol$family = outcome_labels$family[match(coefs_third2_pro_basecontrol$layer3_specificoutcome,
517
+ outcome_labels$specificoutcome)]
518
+
519
+ coefs_third2_pro_basecontrol <- mutate(coefs_third2_pro_basecontrol,
520
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)","Platform Interaction","Media Trust\n(unit scale, + is more trusting)","Affective Polarization\n(unit scale, + is greater polarization)"),ordered = T))
521
+
522
+ ## manipulate to get all unit scales:
523
+ coefs_third2_pro_basecontrol$est[coefs_third2_pro_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third2_pro_basecontrol$est[coefs_third2_pro_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
524
+ coefs_third2_pro_basecontrol$se[coefs_third2_pro_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third2_pro_basecontrol$se[coefs_third2_pro_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
525
+
526
+ coefs_third2_pro_basecontrol$est[coefs_third2_pro_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third2_pro_basecontrol$est[coefs_third2_pro_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
527
+ coefs_third2_pro_basecontrol$se[coefs_third2_pro_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third2_pro_basecontrol$se[coefs_third2_pro_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
528
+
529
+ coefs_third2_pro_basecontrol <- coefs_third2_pro_basecontrol %>%
530
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
531
+ ci_hi_99 = est + qnorm(0.999)*se,
532
+ ci_lo_95 = est + qnorm(0.025)*se,
533
+ ci_hi_95 = est + qnorm(0.975)*se,
534
+ ci_lo_90 = est + qnorm(0.05)*se,
535
+ ci_hi_90 = est + qnorm(0.95)*se,
536
+ plotorder = rep((nrow(coefs_third2_pro_basecontrol)/3):1,3),
537
+ alpha = ifelse(p.adj<0.05, T, F),
538
+ alpha = as.logical(alpha),
539
+ alpha = replace_na(alpha,F),
540
+ Sample_color = as.character(Sample),
541
+ Sample_color = replace(Sample_color,alpha==F,"insig")
542
+ )
543
+ writeLines(as.character(abs(round(filter(coefs_third2_pro_basecontrol,layer3_specificoutcome=="platform_duration" & Sample=="Minimum Wage\n(YouGov)")$est*60,1))),
544
+ con = "../results/beta_minutes_recsys_duration_third2_proseed_study3.tex",sep="%")
545
+
546
+ (coefplot_third2_pro_basecontrol <- ggplot(filter(coefs_third2_pro_basecontrol),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
547
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
548
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
549
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
550
+ geom_text(data=filter(coefs_third2_pro_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
551
+ geom_hline(yintercept = 0,lty=2) +
552
+ facet_wrap(~family,ncol=1,scales="free") +
553
+ scale_x_continuous("",
554
+ breaks = coefs_third2_pro_basecontrol$plotorder,labels = coefs_third2_pro_basecontrol$outcome) +
555
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all liberal seed\n(95% and 90% CIs)") +
556
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
557
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
558
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
559
+ coord_flip() +
560
+ theme_bw(base_family = "sans") +
561
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
562
+ )
563
+ ggsave(coefplot_third2_pro_basecontrol,
564
+ filename = "../results/coefplot_third2_pro_basecontrol_3studies.png",width=5,height=8.5)
565
+ ggsave(coefplot_third2_pro_basecontrol,
566
+ filename = "../results/coefplot_third2_pro_basecontrol_3studies.pdf",width=5,height=8.5)
567
+
568
+ (coefplot_third2_pro_basecontrol_empty <- ggplot(filter(coefs_third2_pro_basecontrol),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
569
+ geom_blank(aes(ymin=ci_lo_95,ymax=ci_hi_95),position=position_dodge(width=0.5),width=0,lwd=0.5) +
570
+ geom_blank(aes(ymin=ci_lo_90,ymax=ci_hi_90),position=position_dodge(width=0.5),width=0,lwd=1) +
571
+ geom_blank(aes(y=est,shape=Sample),position=position_dodge(width=0.5),size=3) +
572
+ geom_blank(data=filter(coefs_third2_pro_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),position=position_dodge(width=0.5),size=3) +
573
+ geom_hline(yintercept = 0,lty=2) +
574
+ facet_wrap(~family,ncol=1,scales="free") +
575
+ scale_x_continuous("",
576
+ breaks = coefs_third2_pro_basecontrol$plotorder,labels = coefs_third2_pro_basecontrol$outcome) +
577
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all liberal seed\n(95% and 90% CIs)") +
578
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
579
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
580
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
581
+ coord_flip() +
582
+ theme_bw(base_family = "sans") +
583
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
584
+ )
585
+ ggsave(coefplot_third2_pro_basecontrol_empty,
586
+ filename = "../results/coefplot_third2_pro_basecontrol_empty_3studies.png",width=5,height=8.5)
587
+
588
+ (coefplot_third2_pro_basecontrol_toptwo <- ggplot(filter(coefs_third2_pro_basecontrol,layer1_hypothesisfamily %in% c("policy","platform")),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
589
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
590
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
591
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
592
+ geom_text(data=filter(coefs_third2_pro_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
593
+ geom_hline(yintercept = 0,lty=2) +
594
+ facet_wrap(~family,ncol=1,scales="free") +
595
+ scale_x_continuous("",
596
+ breaks = coefs_third2_pro_basecontrol$plotorder,labels = coefs_third2_pro_basecontrol$outcome) +
597
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all liberal seed\n(95% and 90% CIs)") +
598
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
599
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
600
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
601
+ coord_flip() +
602
+ theme_bw(base_family = "sans") +
603
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
604
+ )
605
+ ggsave(coefplot_third2_pro_basecontrol_toptwo,
606
+ filename = "../results/coefplot_third2_pro_basecontrol_3studies_toptwo.png",width=5,height=4.75)
607
+ ggsave(coefplot_third2_pro_basecontrol_toptwo,
608
+ filename = "../results/coefplot_third2_pro_basecontrol_3studies_toptwo.pdf",width=5,height=4.75)
609
+
610
+ coefs_third2_anti_basecontrol <- coefs_basecontrol %>%
611
+ filter(layer2_treatmentcontrast == "neutral con 31 - neutral con 22" &
612
+ layer3_specificoutcome != "overall")
613
+
614
+ coefs_third2_anti_basecontrol$outcome = outcome_labels$outcome[match(coefs_third2_anti_basecontrol$layer3_specificoutcome,
615
+ outcome_labels$specificoutcome)]
616
+
617
+ coefs_third2_anti_basecontrol$family = outcome_labels$family[match(coefs_third2_anti_basecontrol$layer3_specificoutcome,
618
+ outcome_labels$specificoutcome)]
619
+
620
+ coefs_third2_anti_basecontrol <- mutate(coefs_third2_anti_basecontrol,
621
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)","Platform Interaction","Media Trust\n(unit scale, + is more trusting)","Affective Polarization\n(unit scale, + is greater polarization)"),ordered = T))
622
+
623
+ ## manipulate to get all unit scales:
624
+ coefs_third2_anti_basecontrol$est[coefs_third2_anti_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third2_anti_basecontrol$est[coefs_third2_anti_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
625
+ coefs_third2_anti_basecontrol$se[coefs_third2_anti_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third2_anti_basecontrol$se[coefs_third2_anti_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
626
+
627
+ coefs_third2_anti_basecontrol$est[coefs_third2_anti_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third2_anti_basecontrol$est[coefs_third2_anti_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
628
+ coefs_third2_anti_basecontrol$se[coefs_third2_anti_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third2_anti_basecontrol$se[coefs_third2_anti_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
629
+
630
+ coefs_third2_anti_basecontrol <- coefs_third2_anti_basecontrol %>%
631
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
632
+ ci_hi_99 = est + qnorm(0.999)*se,
633
+ ci_lo_95 = est + qnorm(0.025)*se,
634
+ ci_hi_95 = est + qnorm(0.975)*se,
635
+ ci_lo_90 = est + qnorm(0.05)*se,
636
+ ci_hi_90 = est + qnorm(0.95)*se,
637
+ plotorder = rep((nrow(coefs_third2_anti_basecontrol)/3):1,3),
638
+ alpha = ifelse(p.adj<0.05, T, F),
639
+ alpha = as.logical(alpha),
640
+ alpha = replace_na(alpha,F),
641
+ Sample_color = as.character(Sample),
642
+ Sample_color = replace(Sample_color,alpha==F,"insig")
643
+ )
644
+
645
+ writeLines(as.character(abs(round(filter(coefs_third2_anti_basecontrol,layer3_specificoutcome=="platform_duration" & Sample=="Gun Control\n(MTurk)")$est*60,1))),
646
+ con = "../results/beta_minutes_recsys_duration_third2_antiseed_study1.tex",sep="%")
647
+
648
+ (coefplot_third2_anti_basecontrol <- ggplot(filter(coefs_third2_anti_basecontrol),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
649
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
650
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
651
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
652
+ geom_text(data=filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
653
+ geom_hline(yintercept = 0,lty=2) +
654
+ facet_wrap(~family,ncol=1,scales="free") +
655
+ scale_x_continuous("",
656
+ breaks = coefs_third2_anti_basecontrol$plotorder,labels = coefs_third2_anti_basecontrol$outcome) +
657
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all conservative seed\n(95% and 90% CIs)") +
658
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
659
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
660
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
661
+ coord_flip() +
662
+ theme_bw(base_family = "sans") +
663
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
664
+ )
665
+ ggsave(coefplot_third2_anti_basecontrol,
666
+ filename = "../results/coefplot_third2_anti_basecontrol_3studies.png",width=5,height=8.5)
667
+ ggsave(coefplot_third2_anti_basecontrol,
668
+ filename = "../results/coefplot_third2_anti_basecontrol_3studies.pdf",width=5,height=8.5)
669
+
670
+ (coefplot_third2_anti_basecontrol_empty <- ggplot(filter(coefs_third2_anti_basecontrol),aes(x=plotorder,group=Sample,col=ifelse(p.adj<0.05,T,F))) +
671
+ geom_blank(aes(ymin=ci_lo_95,ymax=ci_hi_95),position=position_dodge(width=0.5),width=0,lwd=0.5) +
672
+ geom_blank(aes(ymin=ci_lo_90,ymax=ci_hi_90),position=position_dodge(width=0.5),width=0,lwd=1) +
673
+ geom_blank(aes(y=est,shape=Sample),position=position_dodge(width=0.5),size=2) +
674
+ geom_hline(yintercept = 0,lty=2) +
675
+ facet_wrap(~family,ncol=1,scales="free") +
676
+ scale_x_continuous("",
677
+ breaks = coefs_third2_anti_basecontrol$plotorder,labels = coefs_third2_anti_basecontrol$outcome) +
678
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all conservative seed\n(95% and 90% CIs)") +
679
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
680
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
681
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
682
+ coord_flip() +
683
+ theme_bw(base_family = "sans") +
684
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
685
+ )
686
+ ggsave(coefplot_third2_anti_basecontrol_empty,
687
+ filename = "../results/coefplot_third2_anti_basecontrol_empty_3studies.png",width=5,height=8.5)
688
+
689
+ (coefplot_third2_anti_basecontrol_toptwo <- ggplot(filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily %in% c("policy","platform")),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
690
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
691
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
692
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
693
+ geom_text(data=filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
694
+ geom_hline(yintercept = 0,lty=2) +
695
+ facet_wrap(~family,ncol=1,scales="free") +
696
+ scale_x_continuous("",
697
+ breaks = coefs_third2_anti_basecontrol$plotorder,labels = coefs_third2_anti_basecontrol$outcome) +
698
+ scale_y_continuous("Treatment effect of 3/1 vs. 2/2\nalgorithm, all conservative seed\n(95% and 90% CIs)") +
699
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
700
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
701
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
702
+ coord_flip() +
703
+ theme_bw(base_family = "sans") +
704
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
705
+ )
706
+ ggsave(coefplot_third2_anti_basecontrol_toptwo,
707
+ filename = "../results/coefplot_third2_anti_basecontrol_3studies_toptwo.png",width=5,height=4.75)
708
+ ggsave(coefplot_third2_anti_basecontrol_toptwo,
709
+ filename = "../results/coefplot_third2_anti_basecontrol_3studies_toptwo.pdf",width=5,height=4.75)
710
+
711
+
712
+ ##### Moderates (seed) #####
713
+ coefs_third2_31_basecontrol <- coefs_basecontrol %>%
714
+ filter(layer2_treatmentcontrast == "neutral con 31 - neutral lib 31" &
715
+ layer3_specificoutcome != "overall")
716
+
717
+ coefs_third2_31_basecontrol$outcome = outcome_labels$outcome[match(coefs_third2_31_basecontrol$layer3_specificoutcome,
718
+ outcome_labels$specificoutcome)]
719
+
720
+ coefs_third2_31_basecontrol$family = outcome_labels$family[match(coefs_third2_31_basecontrol$layer3_specificoutcome,
721
+ outcome_labels$specificoutcome)]
722
+
723
+ coefs_third2_31_basecontrol <- mutate(coefs_third2_31_basecontrol,
724
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)","Platform Interaction","Media Trust\n(unit scale, + is more trusting)","Affective Polarization\n(unit scale, + is greater polarization)"),ordered = T))
725
+
726
+ ## manipulate to get all unit scales:
727
+ coefs_third2_31_basecontrol$est[coefs_third2_31_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third2_31_basecontrol$est[coefs_third2_31_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
728
+ coefs_third2_31_basecontrol$se[coefs_third2_31_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third2_31_basecontrol$se[coefs_third2_31_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
729
+
730
+ coefs_third2_31_basecontrol$est[coefs_third2_31_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third2_31_basecontrol$est[coefs_third2_31_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
731
+ coefs_third2_31_basecontrol$se[coefs_third2_31_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third2_31_basecontrol$se[coefs_third2_31_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
732
+
733
+ coefs_third2_31_basecontrol <- coefs_third2_31_basecontrol %>%
734
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
735
+ ci_hi_99 = est + qnorm(0.999)*se,
736
+ ci_lo_95 = est + qnorm(0.025)*se,
737
+ ci_hi_95 = est + qnorm(0.975)*se,
738
+ ci_lo_90 = est + qnorm(0.05)*se,
739
+ ci_hi_90 = est + qnorm(0.95)*se,
740
+ plotorder = rep((nrow(coefs_third2_31_basecontrol)/3):1,3),
741
+ alpha = ifelse(p.adj<0.05, T, F),
742
+ alpha = as.logical(alpha),
743
+ alpha = replace_na(alpha,F),
744
+ Sample_color = as.character(Sample),
745
+ Sample_color = replace(Sample_color,alpha==F,"insig")
746
+ )
747
+
748
+ dummy_df <- data.frame(family=c("Platform Interaction","Platform Interaction"),est=c(-0.5,0.5),plotorder=c(9,9),Sample=c("Gun Control\n(MTurk)","Gun Control\n(MTurk)"),alpha=c(FALSE,FALSE)) %>% mutate(family=factor(family))
749
+
750
+ (coefplot_third2_31_basecontrol <- ggplot(filter(coefs_third2_31_basecontrol),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
751
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
752
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
753
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
754
+ geom_blank(data=dummy_df,aes(y=est)) +
755
+ geom_text(data=filter(coefs_third2_31_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
756
+ geom_hline(yintercept = 0,lty=2) +
757
+ facet_wrap(~family,ncol=1,scales="free") +
758
+ # facet_grid(rows = "family",scales="free",space = "free_y",switch = "y") +
759
+ scale_x_continuous("",
760
+ breaks = coefs_third2_31_basecontrol$plotorder,labels = coefs_third2_31_basecontrol$outcome) +
761
+ scale_y_continuous("Treatment effect of conservative seed vs.\nliberal seed video, all 3/1 algorithm\n(95% and 90% CIs)") +
762
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
763
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
764
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
765
+ coord_flip() +
766
+ # coord_flip(ylim=c(-0.3,0.3)) +
767
+ theme_bw(base_family = "sans") +
768
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
769
+ )
770
+ ggsave(coefplot_third2_31_basecontrol,
771
+ filename = "../results/coefplot_third2_31_basecontrol_3studies.png",width=5,height=8.5)
772
+ ggsave(coefplot_third2_31_basecontrol,
773
+ filename = "../results/coefplot_third2_31_basecontrol_3studies.pdf",width=5,height=8.5)
774
+
775
+ (coefplot_third2_31_basecontrol_empty <- ggplot(filter(coefs_third2_31_basecontrol),aes(x=plotorder,group=Sample,col=ifelse(p.adj<0.05,T,F))) +
776
+ geom_blank(aes(ymin=ci_lo_95,ymax=ci_hi_95),position=position_dodge(width=0.5),width=0,lwd=0.5) +
777
+ geom_blank(aes(ymin=ci_lo_90,ymax=ci_hi_90),position=position_dodge(width=0.5),width=0,lwd=1) +
778
+ geom_blank(aes(y=est,shape=Sample),position=position_dodge(width=0.5),size=2) +
779
+ geom_hline(yintercept = 0,lty=2) +
780
+ facet_wrap(~family,ncol=1,scales="free") +
781
+ scale_x_continuous("",
782
+ breaks = coefs_third2_31_basecontrol$plotorder,labels = coefs_third2_31_basecontrol$outcome) +
783
+ scale_y_continuous("Treatment effect of conservative seed vs.\nliberal seed video, all 3/1 algorithm\n(95% and 90% CIs)") +
784
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
785
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
786
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
787
+ coord_flip(ylim=c(-0.3,0.3)) +
788
+ theme_bw(base_family = "sans") +
789
+ theme(strip.background = element_rect(fill="white"),legend.position="bottom",legend.margin = margin(0,0,0,-3,"lines"))
790
+ )
791
+ ggsave(coefplot_third2_31_basecontrol_empty,
792
+ filename = "../results/coefplot_third2_31_basecontrol_empty_3studies.png",width=5,height=8.5)
793
+
794
+
795
+ # create DF to set axis limits:
796
+ dummy_df <- data.frame(family=c("Platform Interaction","Platform Interaction"),est=c(-0.5,0.5),plotorder=c(9,9),Sample=c("Gun Control\n(MTurk)","Gun Control\n(MTurk)"),alpha=c(FALSE,FALSE)) %>% mutate(family=factor(family))
797
+
798
+ (coefplot_third2_31_basecontrol_toptwo <- ggplot(filter(coefs_third2_31_basecontrol,layer1_hypothesisfamily %in% c("policy","platform")),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
799
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
800
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
801
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
802
+ geom_blank(data=dummy_df,aes(y=est)) +
803
+ geom_text(data=filter(coefs_third2_31_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
804
+ geom_hline(yintercept = 0,lty=2) +
805
+ facet_wrap(~family,ncol=1,scales="free") +
806
+ # facet_grid(rows = "family",scales="free",space = "free_y",switch = "y") +
807
+ scale_x_continuous("",
808
+ breaks = coefs_third2_31_basecontrol$plotorder,labels = coefs_third2_31_basecontrol$outcome) +
809
+ scale_y_continuous("Treatment effect of conservative seed vs.\nliberal seed video, all 3/1 algorithm\n(95% and 90% CIs)") +
810
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
811
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
812
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
813
+ coord_flip() +
814
+ # coord_flip(ylim=c(-0.4,0.4)) +
815
+ theme_bw(base_family = "sans") +
816
+ theme(strip.background = element_rect(fill="white"),legend.position="none",plot.margin = margin(5,10,5,5))
817
+ )
818
+ ggsave(coefplot_third2_31_basecontrol_toptwo,
819
+ filename = "../results/coefplot_third2_31_basecontrol_3studies_toptwo.png",width=5,height=4.75)
820
+ ggsave(coefplot_third2_31_basecontrol_toptwo,
821
+ filename = "../results/coefplot_third2_31_basecontrol_3studies_toptwo.pdf",width=5,height=4.75)
822
+
823
+
824
+
825
+ coefs_third2_22_basecontrol <- coefs_basecontrol %>%
826
+ filter(layer2_treatmentcontrast == "neutral con 22 - neutral lib 22" &
827
+ layer3_specificoutcome != "overall")
828
+
829
+ coefs_third2_22_basecontrol$outcome = outcome_labels$outcome[match(coefs_third2_22_basecontrol$layer3_specificoutcome,
830
+ outcome_labels$specificoutcome)]
831
+
832
+ coefs_third2_22_basecontrol$family = outcome_labels$family[match(coefs_third2_22_basecontrol$layer3_specificoutcome,
833
+ outcome_labels$specificoutcome)]
834
+
835
+ coefs_third2_22_basecontrol <- mutate(coefs_third2_22_basecontrol,
836
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)","Platform Interaction","Media Trust\n(unit scale, + is more trusting)","Affective Polarization\n(unit scale, + is greater polarization)"),ordered = T))
837
+
838
+ ## manipulate to get all unit scales:
839
+ coefs_third2_22_basecontrol$est[coefs_third2_22_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third2_22_basecontrol$est[coefs_third2_22_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
840
+ coefs_third2_22_basecontrol$se[coefs_third2_22_basecontrol$layer3_specificoutcome=="platform_duration"] <- coefs_third2_22_basecontrol$se[coefs_third2_22_basecontrol$layer3_specificoutcome=="platform_duration"]/3600
841
+
842
+ coefs_third2_22_basecontrol$est[coefs_third2_22_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third2_22_basecontrol$est[coefs_third2_22_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
843
+ coefs_third2_22_basecontrol$se[coefs_third2_22_basecontrol$layer3_specificoutcome=="affpol_ft_w2"] <- coefs_third2_22_basecontrol$se[coefs_third2_22_basecontrol$layer3_specificoutcome=="affpol_ft_w2"]/100
844
+
845
+ coefs_third2_22_basecontrol <- coefs_third2_22_basecontrol %>%
846
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
847
+ ci_hi_99 = est + qnorm(0.999)*se,
848
+ ci_lo_95 = est + qnorm(0.025)*se,
849
+ ci_hi_95 = est + qnorm(0.975)*se,
850
+ ci_lo_90 = est + qnorm(0.05)*se,
851
+ ci_hi_90 = est + qnorm(0.95)*se,
852
+ plotorder = rep((nrow(coefs_third2_22_basecontrol)/3):1,3),
853
+ alpha = ifelse(p.adj<0.05, T, F),
854
+ alpha = as.logical(alpha),
855
+ alpha = replace_na(alpha,F),
856
+ Sample_color = as.character(Sample),
857
+ Sample_color = replace(Sample_color,alpha==F,"insig")
858
+ )
859
+
860
+ (coefplot_third2_22_basecontrol <- ggplot(filter(coefs_third2_22_basecontrol),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
861
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
862
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
863
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
864
+ geom_text(data=filter(coefs_third2_22_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
865
+ geom_hline(yintercept = 0,lty=2) +
866
+ facet_wrap(~family,ncol=1,scales="free") +
867
+ scale_x_continuous("",
868
+ breaks = coefs_third2_22_basecontrol$plotorder,labels = coefs_third2_22_basecontrol$outcome) +
869
+ scale_y_continuous("Treatment effect of conservative seed vs.\nliberal seed video, all 2/2 algorithm\n(95% and 90% CIs)") +
870
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
871
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
872
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
873
+ coord_flip() +
874
+ theme_bw(base_family = "sans") +
875
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
876
+ )
877
+ ggsave(coefplot_third2_22_basecontrol,
878
+ filename = "../results/coefplot_third2_22_basecontrol_3studies.png",width=5,height=8.5)
879
+ ggsave(coefplot_third2_22_basecontrol,
880
+ filename = "../results/coefplot_third2_22_basecontrol_3studies.pdf",width=5,height=8.5)
881
+
882
+ (coefplot_third2_22_basecontrol_empty <- ggplot(filter(coefs_third2_22_basecontrol),aes(x=plotorder,group=Sample,col=ifelse(p.adj<0.05,T,F))) +
883
+ geom_blank(aes(ymin=ci_lo_95,ymax=ci_hi_95),position=position_dodge(width=0.5),width=0,lwd=0.5) +
884
+ geom_blank(aes(ymin=ci_lo_90,ymax=ci_hi_90),position=position_dodge(width=0.5),width=0,lwd=1) +
885
+ geom_blank(aes(y=est,shape=Sample),position=position_dodge(width=0.5),size=2) +
886
+ geom_hline(yintercept = 0,lty=2) +
887
+ facet_wrap(~family,ncol=1,scales="free") +
888
+ scale_x_continuous("",
889
+ breaks = coefs_third2_22_basecontrol$plotorder,labels = coefs_third2_22_basecontrol$outcome) +
890
+ scale_y_continuous("Treatment effect of conservative seed vs.\nliberal seed video, all 2/2 algorithm\n(95% and 90% CIs)") +
891
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
892
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
893
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
894
+ coord_flip(ylim=c(-0.6,0.6)) +
895
+ theme_bw(base_family = "sans") +
896
+ theme(strip.background = element_rect(fill="white"),legend.position="bottom",legend.margin = margin(0,0,0,-3,"lines"))
897
+ )
898
+ ggsave(coefplot_third2_22_basecontrol_empty,
899
+ filename = "../results/coefplot_third2_22_basecontrol_empty_3studies.png",width=5,height=8.5)
900
+
901
+ (coefplot_third2_22_basecontrol_toptwo <- ggplot(filter(coefs_third2_22_basecontrol,layer1_hypothesisfamily %in% c("policy","platform")),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
902
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
903
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
904
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
905
+ geom_text(data=filter(coefs_third2_22_basecontrol,layer1_hypothesisfamily=="policy"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
906
+ geom_hline(yintercept = 0,lty=2) +
907
+ facet_wrap(~family,ncol=1,scales="free") +
908
+ scale_x_continuous("",
909
+ breaks = coefs_third2_22_basecontrol$plotorder,labels = coefs_third2_22_basecontrol$outcome) +
910
+ scale_y_continuous("Treatment effect of conservative seed vs.\nliberal seed video, all 2/2 algorithm\n(95% and 90% CIs)") +
911
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
912
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
913
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
914
+ coord_flip() +
915
+ theme_bw(base_family = "sans") +
916
+ theme(strip.background = element_rect(fill="white"),legend.position="none")
917
+ )
918
+ ggsave(coefplot_third2_22_basecontrol_toptwo,
919
+ filename = "../results/coefplot_third2_22_basecontrol_3studies_toptwo.png",width=5,height=4.75)
920
+ ggsave(coefplot_third2_22_basecontrol_toptwo,
921
+ filename = "../results/coefplot_third2_22_basecontrol_3studies_toptwo.pdf",width=5,height=4.75)
922
+
923
+ ##### All respondents, attitudinal DV only #####
924
+ coefs_policyindex <- filter(coefs_third2_22_basecontrol,layer1_hypothesisfamily=="policy") %>% mutate(contrast="Seed, 2/2",subset="Moderates") %>%
925
+ bind_rows(filter(coefs_third2_31_basecontrol,layer1_hypothesisfamily=="policy") %>% mutate(contrast="Seed, 3/1",subset="Moderates")) %>%
926
+ bind_rows(filter(coefs_third2_pro_basecontrol,layer1_hypothesisfamily=="policy") %>% mutate(contrast="Algorithm, lib. seed",subset="Moderates (liberal seed)")) %>%
927
+ bind_rows(filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily=="policy") %>% mutate(contrast="Algorithm, cons. seed",subset="Moderates (conservative seed)")) %>%
928
+ bind_rows(filter(coefs_third1_basecontrol,layer1_hypothesisfamily=="policy") %>% mutate(contrast="Algorithm, lib. seed",subset="Liberals (liberal seed)")) %>%
929
+ bind_rows(filter(coefs_third3_basecontrol,layer1_hypothesisfamily=="policy") %>% mutate(contrast="Algorithm, cons. seed",subset="Conservatives (conservative seed)")) %>%
930
+ mutate(subset = factor(subset,levels=c("Liberals (liberal seed)","Conservatives (conservative seed)","Moderates (liberal seed)","Moderates (conservative seed)"),ordered = T))
931
+
932
+ (coefplot_policyindex_basecontrol <- ggplot(filter(coefs_policyindex,str_detect(contrast,"Algorithm")),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
933
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
934
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
935
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=3) +
936
+ geom_text(data=filter(coefs_policyindex,subset=="Liberals (liberal seed)"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
937
+ geom_hline(yintercept = 0,lty=2) +
938
+ facet_wrap(~subset,ncol=2,scales="free") +
939
+ scale_x_continuous("",breaks = 8,labels="") +
940
+ scale_y_continuous("Treatment effect of more extreme 3/1 vs. 2/2\nalgorithm on policy index (95% and 90% CIs)") +
941
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
942
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
943
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
944
+ coord_flip(ylim=c(-0.11,0.11)) +
945
+ theme_bw(base_family = "sans") +
946
+ theme(strip.background = element_rect(fill="white"),legend.position="bottom",legend.margin = margin(0,0,0,-3,"lines"),
947
+ axis.ticks.y = element_blank())
948
+ )
949
+ ggsave(coefplot_policyindex_basecontrol,
950
+ filename = "../results/coefplot_policyindex_basecontrol_3studies.png",width=4.5,height=4.5)
951
+
952
+ (coefplot_policyindex_seed_basecontrol <- ggplot(filter(coefs_policyindex,str_detect(contrast,"Seed")),aes(x=plotorder,group=Sample,col=Sample,alpha=alpha)) +
953
+ geom_errorbar(aes(ymin=ci_lo_95,ymax=ci_hi_95,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=0.5) +
954
+ geom_errorbar(aes(ymin=ci_lo_90,ymax=ci_hi_90,col=Sample_color),position=position_dodge(width=0.5),width=0,lwd=1) +
955
+ geom_point(aes(y=est,shape=Sample,col=Sample_color),position=position_dodge(width=0.5),size=2) +
956
+ geom_text(data=filter(coefs_policyindex,contrast=="Seed, 2/2"),aes(y=est+0.006,label=Sample),alpha=1,position=position_dodge(width=0.5),size=3) +
957
+ geom_hline(yintercept = 0,lty=2) +
958
+ facet_wrap(~contrast,ncol=2,scales="free") +
959
+ scale_x_continuous("",breaks = 8,labels="") +
960
+ scale_y_continuous("Treatment effect of conservative vs. liberal\nseed on policy index (95% and 90% CIs)") +
961
+ scale_color_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)","insig"),values=c(vgreen,red_mit,blue_mit,"black")) +
962
+ scale_shape_manual("Study:",breaks = c("Gun Control\n(MTurk)","Minimum Wage\n(MTurk)","Minimum Wage\n(YouGov)"),values=c(16,17,18)) +
963
+ scale_alpha_manual(breaks=c(F,T),values=c(0.25,1)) +
964
+ coord_flip(ylim=c(-0.11,0.11)) +
965
+ theme_bw(base_family = "sans") +
966
+ theme(strip.background = element_rect(fill="white"),legend.position="none",
967
+ axis.ticks.y = element_blank())
968
+ )
969
+ ggsave(coefplot_policyindex_seed_basecontrol,
970
+ filename = "../results/coefplot_policyindex_seed_basecontrol_3studies.png",width=4.5,height=2.5)
971
+
972
+ rm(list = ls())
code/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Dean Knox
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
code/gun control (issue 1)/01_trt_assign.R ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: gun control (issue 1)/01_trt_assign.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ library(tidyverse)
9
+ library(janitor)
10
+ library(lubridate)
11
+ library(randomizr)
12
+
13
+ # create directory to hold cached intermediate files
14
+ dir.create("../results/intermediate data/gun control (issue 1)/",
15
+ recursive = TRUE, showWarnings = FALSE)
16
+
17
+ w1 <- read_csv("../data/gun control (issue 1)/wave1_final.csv")[-c(1,2),] %>%
18
+ clean_names() %>%
19
+ filter(finished == "True", q62 == "I agree to participate\u2028")
20
+
21
+ # Recodes ======================================================================
22
+
23
+ w1 <- w1 %>% mutate(start_date = as_datetime(start_date),
24
+ end_date = as_datetime(end_date),
25
+ survey_time = as.numeric(end_date-start_date))
26
+
27
+ print('wave 1 survey time:')
28
+ summary(w1$survey_time)
29
+
30
+ w1 <- w1 %>%
31
+ mutate(man = ifelse(q26 == "Man", 1, 0),
32
+ black = ifelse(str_detect(q29, "Black"), 1, 0),
33
+ white = ifelse(str_detect(q29, "White"), 1, 0),
34
+ college = ifelse(str_detect(q30, "college ") | str_detect(q30, "Post"), 1, 0),
35
+ income_gt50k = ifelse(q31 %in% names(table(w1$q31))[c(2,3,5,10:13)], 1, 0)
36
+ )
37
+
38
+ # PID:
39
+ w1$pid <- ifelse(w1$pid1=="Democrat",-1,NA)
40
+ w1$pid <- ifelse(w1$pid1=="Republican",1,w1$pid)
41
+ w1$pid[w1$pid4=="Closer to the Republican Party"] <- 1
42
+ w1$pid[w1$pid4=="Closer to the Democratic Party"] <- -1
43
+ w1$pid[w1$pid4=='Neither'] <- 0
44
+
45
+ print('wave 1 party id:')
46
+ round(table(w1$pid) / sum(table(w1$pid)), digits=2)
47
+
48
+ w1$ideo <- ifelse(w1$ideo1=="Liberal",-1,NA)
49
+ w1$ideo <- ifelse(w1$ideo1=="Conservative",1,w1$ideo)
50
+ w1$ideo[w1$ideo4=="Closer to liberals"] <- -1
51
+ w1$ideo[w1$ideo4=="Closer to conservatives"] <- 1
52
+ w1$ideo[w1$ideo4=="Neither"] <- 0
53
+
54
+ print('wave 1 ideology:')
55
+ round(table(w1$ideo) / sum(table(w1$ideo)), digits=2)
56
+
57
+ w1$age <- 2021 - as.numeric(w1$q27)
58
+
59
+
60
+
61
+ # A/V check ====================================================================
62
+
63
+ print("audio check:")
64
+ length(which(w1$q87 == "Quick and easy")) / length(w1$q87)
65
+
66
+ print("video check:")
67
+ length(which(w1$q89 == "wikiHow")) / length(w1$q89)
68
+
69
+ w1$audio_ok <- 1*(w1$q87 == "Quick and easy")
70
+ w1$video_ok <- 1*(w1$q89 == "wikiHow")
71
+
72
+ w1 <- w1 %>%
73
+ mutate(gun_own = dplyr::recode(q15, "Yes" = 1, "No" = 0))
74
+
75
+ # Convert pre-treatment DV to numeric unit scale -------------------------------
76
+
77
+ w1 <- w1 %>%
78
+ mutate( # higher = more pro-gun
79
+ right_to_own_importance = recode(q79, "Protect the right to own guns" = 1, "Regulate gun ownership" = 0),
80
+ assault_ban = (match(q81, names(table(q81))[c(5,3,1,2,4)])-1)/4,
81
+ handgun_ban = (match(q82, names(table(q82))[c(5,3,1,2,4)])-1)/4,
82
+ concealed_safe = 1-(match(q83, names(table(q83))[c(2,5,3,4,1)])-1)/4,
83
+ stricter_laws = (match(q23, names(table(q23))[c(5,3,1,2,4)])-1)/4
84
+ )
85
+
86
+ w1 <- w1 %>%
87
+ rowwise() %>%
88
+ mutate(gun_index = sum(c(right_to_own_importance,assault_ban,handgun_ban,concealed_safe,stricter_laws), na.rm=T),
89
+ gun_index_2 = mean(c(right_to_own_importance,assault_ban,handgun_ban,concealed_safe), na.rm=T))
90
+
91
+ # Cronbach's alpha -------------------------------------------------------------
92
+
93
+ index_fa <- psych::alpha(select(w1, right_to_own_importance, assault_ban, handgun_ban, concealed_safe, stricter_laws), check.keys = TRUE)
94
+ alpha <- index_fa$total["raw_alpha"]
95
+ writeLines(as.character(round(alpha,2)),
96
+ con = "../results/guncontrol_outcomes_alpha.tex",sep = "%")
97
+
98
+
99
+
100
+ # trim sample -------------------------------------------------------------
101
+
102
+ # We exclude respondents who took less than 120 seconds to complete the Wave 1 survey, failed either
103
+ # an audio check or a video check, as well as those whose gun policy opinions fall within the most
104
+ # extreme 5% of the gun policy index outcome (i.e. < 0.25 or > 4.75 on the 0-5 scale, to guard
105
+ # against eventual ceiling/floor effects; in a pilot study this was 15% of the sample).
106
+
107
+ w1 <- w1 %>% filter(audio_ok == 1, video_ok == 1)
108
+ w1 <- w1 %>% filter(survey_time >= 2)
109
+ w1 <- w1 %>% filter(gun_index >= 0.25, gun_index <= 4.75)
110
+
111
+ print('gun index:')
112
+ summary(w1$gun_index)
113
+
114
+
115
+
116
+ # Block random assignment ======================================================
117
+
118
+ # We randomly assign respondents to both a seed video type (pro-gun vs. anti-gun) and a recommendation system (3/1 vs. 2/2)
119
+ # blocking on Wave 1 gun policy opinions. In the sample of respondents
120
+ # who will be invited to Wave 2, we form terciles of the Wave 1 gun policy opinion index, referring
121
+ # to the lower, middle and upper terciles as anti-gun, moderate and pro-gun respectively
122
+
123
+ w1$tercile <- cut(w1$gun_index, breaks = quantile(w1$gun_index, c(0, 1/3, 2/3, 1)), include.lowest = TRUE, labels = 1:3)
124
+ tapply(w1$gun_index, w1$tercile, mean)
125
+ table(w1$tercile)
126
+
127
+ # pure control (with 1/5 probability), anti-gun 2/2 (with 2/5 probability), or anti-gun 3/1 (with 2/5 probability).
128
+ # seed position (pro-gun or anti-gun), recommendation system (2/2 or 3/1), or a
129
+ # pure control group (i.e. one of five possible conditions) with equal probability
130
+
131
+ set.seed(2021)
132
+
133
+ w1$trt_system <- block_ra(blocks = w1$tercile, prob_each = c(2/5, 2/5, 1/5), conditions = c("2/2", "3/1", "pure control"))
134
+
135
+ w1$seed <- rep("", nrow(w1))
136
+ w1[w1$tercile == 1,]$seed <- "anti-gun seed"
137
+ w1[w1$tercile == 3,]$seed <- "pro-gun seed"
138
+ w1[w1$tercile == 2,]$seed <- complete_ra(N = length(which(w1$tercile == 2)), prob = 0.5, conditions = c("pro-gun seed",
139
+ "anti-gun seed"))
140
+ with(w1[w1$tercile == 1,], round(prop.table(table(seed, trt_system)), digits = 3))
141
+ with(w1[w1$tercile == 2,], round(prop.table(table(seed, trt_system)), digits = 3))
142
+ with(w1[w1$tercile == 3,], round(prop.table(table(seed, trt_system)), digits = 3))
143
+
144
+ w1 <- w1 %>% mutate(trt_assign = case_when(seed == "anti-gun seed" & trt_system == "2/2" ~ 1,
145
+ seed == "anti-gun seed" & trt_system == "3/1" ~ 2,
146
+ seed == "pro-gun seed" & trt_system == "2/2" ~ 3,
147
+ seed == "pro-gun seed" & trt_system == "3/1" ~ 4,
148
+ trt_system == "pure control" ~ 5))
149
+
150
+ print('treatment assignment:')
151
+ table(w1$trt_assign)
152
+ print('seed assignment:')
153
+ table(w1$seed)
154
+ print('system assignment:')
155
+ table(w1$trt_system)
156
+ print('seed & system assignment:')
157
+ table(w1$trt_system, w1$seed)
158
+
159
+ w1$batch <- sample(c(rep(1:floor(nrow(w1)/500), 500), rep(6, nrow(w1)-500*5)))
160
+
161
+ # sent to Qualtrics ------------------------------------------------------------
162
+
163
+ # write_csv(data.frame(trt = w1$trt_assign, id = w1$worker_id, batch = w1$batch),
164
+ # "guncontrol_wave1_assignments.csv")
code/gun control (issue 1)/02_clean_merge.R ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## YouTube Algorithms and Gun Control Opinions
2
+ ## Data collected June 2021 via MTurk/CloudResearch
3
+
4
+ cat(rep('=', 80),
5
+ '\n\n',
6
+ 'OUTPUT FROM: gun control (issue 1)/02_clean_merge.R',
7
+ '\n\n',
8
+ sep = ''
9
+ )
10
+
11
+ ## Preamble ----------------------------
12
+ library(tidyverse)
13
+ library(janitor)
14
+ library(lubridate)
15
+ library(stargazer)
16
+ library(broom)
17
+ library(corrplot)
18
+
19
+ a <- read_csv("../data/gun control (issue 1)/wave1_final.csv")[-c(1,2),] %>%
20
+ clean_names()
21
+
22
+ # Wave 1 =======================================================================
23
+
24
+ # Recodes:
25
+ a <- a %>% mutate(start_date = as_datetime(start_date),
26
+ end_date = as_datetime(end_date),
27
+ survey_time = as.numeric(end_date-start_date))
28
+
29
+ print('wave 1 survey time:')
30
+ summary(a$survey_time)
31
+
32
+ # Demographics -----------------------------------------------------------------
33
+
34
+ a <- a %>%
35
+ mutate(female = ifelse(q26 == "Woman", 1, 0),
36
+ male = ifelse(q26 == "Man", 1, 0),
37
+ black = ifelse(str_detect(q29, "Black"), 1, 0),
38
+ white = ifelse(str_detect(q29, "White"), 1, 0),
39
+ college = ifelse(str_detect(q30, "college ") | str_detect(q30, "Post"), 1, 0),
40
+ # dk: confirmed
41
+ income_gt50k = ifelse(q31 %in% names(table(a$q31))[c(2,3,5,10:13)], 1, 0)
42
+ )
43
+ a$income_gt50k[is.na(a$q31)] <- NA
44
+
45
+ # PID:
46
+
47
+ a <- a %>%
48
+ mutate(pid = case_when(pid1=="Democrat" ~ -1,
49
+ pid1=="Republican" ~ 1,
50
+ pid4=="Closer to the Republican Party" ~ 1,
51
+ pid4=="Closer to the Democratic Party" ~ -1,
52
+ pid4=="Neither" ~ 0))
53
+
54
+ a <- a %>%
55
+ mutate(ideo = case_when(ideo1=="Liberal" ~ -1,
56
+ ideo1=="Conservative" ~ 1,
57
+ ideo4=="Closer to conservatives" ~ 1,
58
+ ideo4=="Closer to liberals" ~ -1,
59
+ ideo4=="Neither" ~ 0))
60
+
61
+ a$age <- 2021 - as.numeric(a$q27)
62
+
63
+ # age categories: 18-29; 30-44; 45-64; 65+
64
+ a <- a %>%
65
+ mutate(age_cat = case_when(age>=18 & age<=29 ~ "18-29",
66
+ age>=30 & age<=44 ~ "30-44",
67
+ age>=45 & age<=64 ~ "45-64",
68
+ age>=65 ~ "65+"
69
+ ))
70
+ a <- a %>%
71
+ fastDummies::dummy_cols(select_columns = "age_cat")
72
+
73
+ ## Need:
74
+ # political interest (5-point scale: 1=Not atall interested, 5=Extremely interested),
75
+ # self-reported YouTube usage frequency (7-pointscale: 0=None, 6=More than 3 hours per day),
76
+ # number of self-reported favorite YouTubechannels (count coded from open-ended question: “Who/what are your favorite YouTubebroadcasters or channels?”; 0 if blank),
77
+ # indicator for having watched videos from popularchannels (1 if any selected: “In the past week, have you watched videos from any of thefollowing YouTube broadcasters or channels?”),
78
+ # video vs. text preference (1=Alwaysprefer videos, 10=Always prefer text),
79
+ # gun enthusiasm (additive index of “Do you ordoes anyone in your household own a gun?” with yes=1 and “How often, if ever, do youvisit websites about guns, hunting or other shooting sports?” from 0=Never or Hardlyever to 1=Sometimes or Often),
80
+ # gun policy issue importance (4-point scale: 1=Not atall important, 4=Very important)
81
+
82
+ a <- a %>%
83
+ mutate(pol_interest = recode(q91,"Extremely interested"=5,"Very interested"=4,"Somewhat interested"=3,"Not very interested"=2,"Not at all interested"=1),
84
+ freq_youtube = recode(q77,"More than 3 hours per day"=6,"2–3 hours per day"=5,"1–2 hours per day"=4,"31–59 minutes per day"=3,"10–30 minutes per day"=2,"Less than 10 minutes per day"=1,"None"=0),
85
+ fav_channels = str_count(q8,"\n"), # should be one per line but this might not be right - need to hand-code
86
+ popular_channels = ifelse(is.na(q78),0,1),
87
+ vid_pref = recode(q9,"Always prefer videos\n1\n"=1,"2"=2,"3"=3,"4"=4,"5"=5,"6"=6,"7"=7,"8"=8,"9"=9,"Always prefer text\n10\n"=10),
88
+ visit_shooting_sites = recode(q18,"Never"=0,"Hardly ever"=0,"Sometimes"=1,"Often"=1),
89
+ gun_own = recode(q15, "Yes" = 1, "No" = 0),
90
+ gun_enthusiasm = visit_shooting_sites + gun_own,
91
+ gun_importance = recode(q76_8,"Very important"=4,"Somewhat important"=3,"Not too important"=2,"Not at all important"=1)
92
+ )
93
+
94
+ descr_data <- as.data.frame(select(a,
95
+ female,
96
+ white,
97
+ black,
98
+ age,
99
+ college,
100
+ income_gt50k))
101
+ descr_data <- descr_data %>% filter(rowSums(is.na(.)) != ncol(.))
102
+ descriptive_tab <- stargazer(descr_data,
103
+ summary = T, digits=2,
104
+ summary.stat=c("mean","sd","median","min","max","n"),
105
+ covariate.labels = c("Female",
106
+ "White",
107
+ "Black",
108
+ "Age",
109
+ "College educated",
110
+ "Income \\textgreater 50k"),
111
+ float = F,
112
+ out = "../results/guncontrol_descriptive.tex"
113
+ )
114
+
115
+ summary_tab <- a %>%
116
+ dplyr::summarize(female = mean(female,na.rm=T),
117
+ white = mean(white,na.rm=T),
118
+ black = mean(black,na.rm=T),
119
+ age1829 = mean(`age_cat_18-29`,na.rm=T),
120
+ age3044 = mean(`age_cat_30-44`,na.rm=T),
121
+ age4564 = mean(`age_cat_45-64`,na.rm=T),
122
+ age65p = mean(`age_cat_65+`,na.rm=T),
123
+ college = mean(college,na.rm=T),
124
+ income_gt50k = mean(income_gt50k,na.rm=T),
125
+ democrat = mean(pid==-1,na.rm=T),
126
+ republican = mean(pid==1,na.rm=T))
127
+
128
+ summary_tab <- pivot_longer(summary_tab,
129
+ cols=c(female,
130
+ white,
131
+ black,
132
+ age1829,
133
+ age3044,
134
+ age4564,
135
+ age65p,
136
+ college,
137
+ income_gt50k,
138
+ democrat,
139
+ republican),
140
+ names_to = "outcome",values_to = "survey_avg")
141
+ outcome_labels <- data.frame(outcome_pretty = c("Female",
142
+ "White",
143
+ "Black",
144
+ "Age 18-29",
145
+ "Age 30-44",
146
+ "Age 45-64",
147
+ "Age 65+",
148
+ "College educated",
149
+ "Income >$50k",
150
+ "Democrat",
151
+ "Republican"),
152
+ outcome = c("female",
153
+ "white",
154
+ "black",
155
+ "age1829",
156
+ "age3044",
157
+ "age4564",
158
+ "age65p",
159
+ "college",
160
+ "income_gt50k",
161
+ "democrat",
162
+ "republican"))
163
+ summary_tab$outcome_pretty <- outcome_labels$outcome_pretty[match(summary_tab$outcome,outcome_labels$outcome)]
164
+ summary_tab <- summary_tab %>%
165
+ mutate(outcome_pretty = factor(outcome_pretty,levels = c("Republican",
166
+ "Democrat",
167
+ "Income >$50k",
168
+ "College educated",
169
+ "Age 65+",
170
+ "Age 45-64",
171
+ "Age 30-44",
172
+ "Age 18-29",
173
+ "Female",
174
+ "Black",
175
+ "White"
176
+ ),ordered=T))
177
+
178
+ (descrip_fig <- ggplot(summary_tab) +
179
+ geom_point(aes(y=outcome_pretty,x=survey_avg)) +
180
+ geom_text(aes(y=outcome_pretty,x=survey_avg,label=paste0(round(100*survey_avg,0),"%")),nudge_x = 0.1) +
181
+ scale_y_discrete("") +
182
+ scale_x_continuous("",labels=scales::percent_format(),limits=c(0,1)) +
183
+ theme_bw()
184
+ )
185
+ ggsave(descrip_fig,filename = "../results/guncontrol_demographics.pdf",height=5,width=4)
186
+
187
+
188
+
189
+ #### Outcomes ####
190
+
191
+ # policy opinions, convert to numeric unit scale:
192
+ a <- a %>%
193
+ mutate( # higher = more pro-gun
194
+ right_to_own_importance = recode(q79, "Protect the right to own guns" = 1, "Regulate gun ownership" = 0),
195
+ assault_ban = recode(q81, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
196
+ handgun_ban = recode(q82, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
197
+ concealed_safe = recode(q83,"Much safer"=4,"Somewhat safer"=3,"No difference"=2,"Somewhat less safe"=1,"Much less safe"=0)/4,
198
+ stricter_laws = recode(q23, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4
199
+ )
200
+
201
+ a <- a %>%
202
+ rowwise() %>%
203
+ mutate(gun_index = mean(c(right_to_own_importance,assault_ban,handgun_ban,concealed_safe,stricter_laws), na.rm=T),
204
+ gun_index_2 = mean(c(right_to_own_importance,assault_ban,handgun_ban,concealed_safe), na.rm=T)) %>%
205
+ ungroup()
206
+
207
+ # Cronbach's alpha
208
+ index_fa <- psych::alpha(select(a, right_to_own_importance, assault_ban, handgun_ban, concealed_safe, stricter_laws), check.keys = TRUE)
209
+ write.csv(data.frame(cor(select(a, right_to_own_importance, assault_ban, handgun_ban, concealed_safe, stricter_laws), use = "complete.obs")),row.names = T,
210
+ file = "../results/guncontrol_cormat_gun_index_w1.csv")
211
+ pdf("../results/guncontrol_corrplot_gunindex_w1.pdf")
212
+ w1_corrplot <- corrplot::corrplot(cor(select(a, right_to_own_importance, assault_ban, handgun_ban, concealed_safe, stricter_laws), use = "complete.obs"),method = "shade")
213
+ dev.off()
214
+
215
+ alpha <- index_fa$total["raw_alpha"]
216
+ writeLines(as.character(round(alpha,2)),con = "../results/guncontrol_outcomes_alpha.tex",sep = "%")
217
+
218
+ # FACTOR ANALYSIS WITH VARIMAX ROTATION (PRE)
219
+ pca2 <- psych::principal(select(a, right_to_own_importance, assault_ban, handgun_ban, concealed_safe, stricter_laws),
220
+ rotate="varimax",
221
+ nfactors=1
222
+ )
223
+ pc2 <- pca2$Vaccounted[2]
224
+ writeLines(as.character(round(pc2, 2)),con = "../results/outcomes_pc2_study1_pre.tex",sep = "%")
225
+
226
+ # media trust
227
+ a <- a %>%
228
+ mutate( # higher = more trusting
229
+ trust_majornews = recode(q58_1,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
230
+ trust_localnews = recode(q58_2,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
231
+ trust_social = recode(q58_3,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
232
+ trust_youtube = recode(q58_4,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
233
+ fabricate_majornews = recode(q89_1,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4,
234
+ fabricate_youtube = recode(q90,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4
235
+ ) %>%
236
+ rowwise() %>%
237
+ mutate(media_trust = mean(trust_majornews,trust_localnews,fabricate_majornews,na.rm=T)) %>%
238
+ ungroup()
239
+
240
+ media_trust_fa <- psych::alpha(select(a, trust_majornews,trust_localnews,fabricate_majornews), check.keys = TRUE)
241
+
242
+ print('media trust alpha:')
243
+ media_trust_fa$total["raw_alpha"]
244
+
245
+
246
+ # affective polarization
247
+ print('check affpol feeling thermometers:')
248
+ a %>%
249
+ group_by(pid) %>%
250
+ summarize(mean_2=mean(as.numeric(q5_2),na.rm=T),
251
+ mean_5=mean(as.numeric(q5_5),na.rm=T),
252
+ mean_11=mean(as.numeric(q5_11),na.rm=T),
253
+ mean_12=mean(as.numeric(q5_12),na.rm=T))
254
+
255
+ a <- a %>%
256
+ mutate(
257
+ smart_dems = recode(q61, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
258
+ smart_reps = recode(q62_1, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
259
+ comfort_dems = recode(q87_1,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
260
+ comfort_reps = recode(q88,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
261
+ ft_dems = as.numeric(q5_11),
262
+ ft_reps = as.numeric(q5_12),
263
+ affpol_smart = case_when(
264
+ pid==-1 ~ smart_dems-smart_reps,
265
+ pid==1 ~ smart_reps-smart_dems
266
+ ),
267
+ affpol_comfort = case_when(
268
+ pid==-1 ~ comfort_dems-comfort_reps,
269
+ pid==1 ~ comfort_reps-comfort_dems
270
+ ),
271
+ affpol_ft = case_when(
272
+ pid==-1 ~ ft_dems-ft_reps,
273
+ pid==1 ~ ft_reps-ft_dems
274
+ )
275
+ )
276
+
277
+
278
+
279
+ ## for reinvitations:
280
+ w1_reinvited <- a %>% filter(q87 == "Quick and easy", q89 == "wikiHow") # AV checks
281
+ w1_reinvited <- w1_reinvited %>% filter(survey_time >= 2)
282
+ w1_reinvited <- w1_reinvited %>% filter(gun_index >= 0.05, gun_index <= 0.95)
283
+
284
+
285
+ w1_reinvited$thirds <- cut(w1_reinvited$gun_index, breaks = quantile(w1_reinvited$gun_index, c(0, 1/3, 2/3, 1)), include.lowest = TRUE, labels = 1:3)
286
+ a$thirds <- w1_reinvited$thirds[match(a$worker_id,w1_reinvited$worker_id)]
287
+
288
+ write_csv(a, "../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w1_clean.csv")
289
+
290
+
291
+ # Wave 2 (main survey) =========================================================
292
+
293
+ w2 <- read_csv("../data/gun control (issue 1)/wave2_final.csv")[-c(1,2),] %>%
294
+ clean_names() %>%
295
+ select(-thirds) # remove all-NA column
296
+
297
+ w2 <- w2 %>% mutate(start_date_w2 = as_datetime(start_date),
298
+ end_date_w2 = as_datetime(end_date),
299
+ survey_time_w2 = as.numeric(end_date_w2-start_date_w2))
300
+
301
+ print('wave 2 survey time:')
302
+ summary(w2$survey_time_w2)
303
+
304
+ print('audio ok:')
305
+ length(which(w2$q81 == "Quick and easy"))/length(w2$q81)
306
+ print('video ok:')
307
+ length(which(w2$q82 == "wikiHow"))/length(w2$q82)
308
+
309
+
310
+ #### Outcomes ####
311
+
312
+ ##### policy opinions ######
313
+ # convert to numeric unit scale:
314
+ w2 <- w2 %>%
315
+ mutate( # higher = more pro-gun
316
+ right_to_own_importance_w2 = recode(q19, "Protect the right to own guns" = 1, "Regulate gun ownership" = 0),
317
+ assault_ban_w2 = recode(q20, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
318
+ handgun_ban_w2 = recode(q21, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
319
+ concealed_safe_w2 = recode(q22,"Much safer"=4,"Somewhat safer"=3,"No difference"=2,"Somewhat less safe"=1,"Much less safe"=0)/4,
320
+ stricter_laws_w2 = recode(q23, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4
321
+ )
322
+
323
+ # Cronbach's alpha
324
+ index_fa <- psych::alpha(select(w2, right_to_own_importance_w2, assault_ban_w2, handgun_ban_w2, concealed_safe_w2, stricter_laws_w2), check.keys = T)
325
+ write.csv(data.frame(cor(select(w2, right_to_own_importance_w2, assault_ban_w2, handgun_ban_w2, concealed_safe_w2, stricter_laws_w2), use = "complete.obs")),row.names = T,
326
+ file = "../results/guncontrol_cormat_gun_index_w2.csv")
327
+ pdf("../results/guncontrol_cormat_gun_index_w2.pdf")
328
+ w2_corrplot <- corrplot::corrplot(cor(select(w2, right_to_own_importance_w2, assault_ban_w2, handgun_ban_w2, concealed_safe_w2, stricter_laws_w2), use = "complete.obs"),method = "shade")
329
+ dev.off()
330
+
331
+ print('wave 2 policy opinion alpha:')
332
+ alpha <- index_fa$total["raw_alpha"]
333
+ print(alpha)
334
+
335
+ # FACTOR ANALYSIS WITH VARIMAX ROTATION (POST)
336
+ pca2 <- psych::principal(select(w2, right_to_own_importance_w2, assault_ban_w2, handgun_ban_w2, concealed_safe_w2, stricter_laws_w2),
337
+ rotate="varimax",
338
+ nfactors=1
339
+ )
340
+ pc2 <- pca2$Vaccounted[2]
341
+ writeLines(as.character(round(pc2, 2)),con = "../results/outcomes_pc2_study1_post.tex",sep = "%")
342
+
343
+ w2 <- w2 %>%
344
+ rowwise() %>%
345
+ mutate(gun_index_w2 = mean(c(right_to_own_importance_w2,assault_ban_w2,handgun_ban_w2,concealed_safe_w2,stricter_laws_w2), na.rm=T),
346
+ gun_index_2_w2 = mean(c(right_to_own_importance_w2,assault_ban_w2,handgun_ban_w2,concealed_safe_w2), na.rm=T))
347
+
348
+
349
+ # media trust
350
+ w2 <- w2 %>%
351
+ mutate( # higher = more trusting
352
+ trust_majornews = recode(q96_1,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
353
+ trust_localnews = recode(q96_2,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
354
+ trust_social = recode(q96_3,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
355
+ trust_youtube = recode(q96_4,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
356
+ fabricate_majornews = recode(q98,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4,
357
+ fabricate_youtube = recode(q100_1,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4
358
+ ) %>%
359
+ rowwise() %>%
360
+ mutate(media_trust = mean(trust_majornews,trust_localnews,fabricate_majornews,na.rm=T)) %>%
361
+ ungroup()
362
+
363
+ ##### affective polarization #####
364
+ # check FTs:
365
+ w2 <- w2 %>%
366
+ mutate(
367
+ smart_dems = recode(q61, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
368
+ smart_reps = recode(q62_1, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
369
+ comfort_dems = recode(q92,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
370
+ comfort_reps = recode(q94,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
371
+ ft_dems = as.numeric(q90_11),
372
+ ft_reps = as.numeric(q90_12)
373
+ )
374
+
375
+
376
+ write_csv(w2, "../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w2_clean.csv")
377
+
378
+
379
+ # join to W1 by MT worker ID:
380
+ w12 <- left_join(a, filter(w2,!is.na(worker_id)), by = "worker_id",suffix=c("_w1","_w2"))
381
+ names(w12)
382
+
383
+ w12 <- w12 %>%
384
+ mutate(
385
+ affpol_smart_w2 = case_when(
386
+ pid==-1 ~ smart_dems_w2-smart_reps_w2,
387
+ pid==1 ~ smart_reps_w2-smart_dems_w2
388
+ ),
389
+ affpol_comfort_w2 = case_when(
390
+ pid==-1 ~ comfort_dems_w2-comfort_reps_w2,
391
+ pid==1 ~ comfort_reps_w2-comfort_dems_w2
392
+ ),
393
+ affpol_ft_w2 = case_when(
394
+ pid==-1 ~ ft_dems_w2-ft_reps_w2,
395
+ pid==1 ~ ft_reps_w2-ft_dems_w2
396
+ ))
397
+
398
+ write_csv(w12, "../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w12_clean.csv")
399
+
400
+
401
+ # Wave 3 (post survey) =========================================================
402
+
403
+ w3 <- read_csv("../data/gun control (issue 1)/wave3_final.csv")[-c(1,2),] %>%
404
+ clean_names()
405
+
406
+ w3 <- w3 %>% mutate(start_date_w3 = as_datetime(start_date),
407
+ end_date_w3 = as_datetime(end_date),
408
+ survey_time_w3 = as.numeric(end_date_w3-start_date_w3))
409
+
410
+ print('wave 3 survey time:')
411
+ summary(w3$survey_time_w3)
412
+
413
+
414
+ #### Outcomes ####
415
+
416
+ # policy opinions, convert to numeric unit scale:
417
+ w3 <- w3 %>%
418
+ mutate( # higher = more pro-gun
419
+ right_to_own_importance_w3 = recode(q79, "Protect the right to own guns" = 1, "Regulate gun ownership" = 0),
420
+ assault_ban_w3 = recode(q81, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
421
+ handgun_ban_w3 = recode(q82, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
422
+ concealed_safe_w3 = recode(q83,"Much safer"=4,"Somewhat safer"=3,"No difference"=2,"Somewhat less safe"=1,"Much less safe"=0)/4,
423
+ stricter_laws_w3 = recode(q23, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4
424
+ )
425
+ write.csv(data.frame(cor(select(w3, right_to_own_importance_w3, assault_ban_w3, handgun_ban_w3, concealed_safe_w3, stricter_laws_w3), use = "complete.obs")),row.names = T,
426
+ file = "../results/guncontrol_cormat_gun_index_w3.csv")
427
+ pdf("../results/guncontrol_corrplot_gunindex_w3.pdf")
428
+ corrplot(cor(select(w3, right_to_own_importance_w3, assault_ban_w3, handgun_ban_w3, concealed_safe_w3, stricter_laws_w3), use = "complete.obs"),method = "shade")
429
+ dev.off()
430
+
431
+ w3 <- w3 %>%
432
+ rowwise() %>%
433
+ mutate(gun_index_w3 = mean(c(right_to_own_importance_w3,assault_ban_w3,handgun_ban_w3,concealed_safe_w3,stricter_laws_w3), na.rm=T),
434
+ gun_index_2_w3 = mean(c(right_to_own_importance_w3,assault_ban_w3,handgun_ban_w3,concealed_safe_w3), na.rm=T))
435
+
436
+ ##### media trust #####
437
+ w3 <- w3 %>%
438
+ mutate( # higher = more trusting
439
+ trust_majornews_w3 = recode(q88_1,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
440
+ trust_localnews_w3 = recode(q88_2,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
441
+ trust_social_w3 = recode(q88_3,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
442
+ trust_youtube_w3 = recode(q88_4,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
443
+ fabricate_majornews_w3 = recode(q90,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4,
444
+ fabricate_youtube_w3 = recode(q92,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4
445
+ ) %>%
446
+ rowwise() %>%
447
+ mutate(media_trust_w3 = mean(trust_majornews_w3,trust_localnews_w3,fabricate_majornews_w3,na.rm=T)) %>%
448
+ ungroup()
449
+
450
+
451
+ # affective polarization
452
+
453
+ w3<- w3 %>%
454
+ mutate(
455
+ smart_dems_w3 = recode(q61, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
456
+ smart_reps_w3 = recode(q62_1, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
457
+ comfort_dems_w3 = recode(q94,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
458
+ comfort_reps_w3 = recode(q96,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
459
+ ft_dems_w3 = as.numeric(q5_11),
460
+ ft_reps_w3 = as.numeric(q5_12)
461
+ )
462
+
463
+ write_csv(w3, "../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_mturk_w3_clean.csv")
464
+
465
+ w123 <- left_join(w12, filter(w3,!is.na(worker_id)), by = "worker_id",suffix=c("","_w3"))
466
+ names(w123)
467
+
468
+ w123 <- w123 %>%
469
+ mutate(
470
+ affpol_smart_w3 = case_when(
471
+ pid==-1 ~ smart_dems_w3-smart_reps_w3,
472
+ pid==1 ~ smart_reps_w3-smart_dems_w3
473
+ ),
474
+ affpol_comfort_w3 = case_when(
475
+ pid==-1 ~ comfort_dems_w3-comfort_reps_w3,
476
+ pid==1 ~ comfort_reps_w3-comfort_dems_w3
477
+ ),
478
+ affpol_ft_w3 = case_when(
479
+ pid==-1 ~ ft_dems_w3-ft_reps_w3,
480
+ pid==1 ~ ft_reps_w3-ft_dems_w3
481
+ ))
482
+
483
+
484
+
485
+
486
+ ## YTRecs session data: -------------------------------------------------------
487
+
488
+ ytrecs <- read_rds("../data/gun control (issue 1)/Wave2_video_June_2021_interactions.rds") %>%
489
+ clean_names() %>%
490
+ as_tibble() %>%
491
+ mutate(duration = end_time2 - start_time2) %>% # have to recalculate this
492
+ select(topic_id,urlid,pro,anti,duration,pro_up,pro_down,anti_up,anti_down,pro_save,anti_save,start_time2, end_time2) %>%
493
+ filter(nchar(urlid)==5 & !is.na(pro))
494
+
495
+ ytrecs <- ytrecs %>%
496
+ group_by(topic_id,urlid) %>%
497
+ mutate(dupes = n(),
498
+ max_duration = ifelse(duration==max(duration),1,0)
499
+ ) %>%
500
+ filter(max_duration==1) # using longest session as valid one
501
+
502
+ ytrecs <- ytrecs %>%
503
+ rowwise() %>%
504
+ mutate(
505
+ pro_up = replace_na(pro_up,0),
506
+ pro_down = replace_na(pro_down,0),
507
+ anti_up = replace_na(anti_up,0),
508
+ anti_down = replace_na(anti_down,0),
509
+ pro_save = replace_na(pro_save,0),
510
+ anti_save = replace_na(anti_save,0),
511
+
512
+ total_likes = sum(pro_up,anti_up,na.rm=T),
513
+ total_dislikes = sum(pro_down,anti_down,na.rm=T),
514
+ total_thumbs = sum(pro_up,pro_down,anti_up,anti_down,na.rm=T),
515
+ total_saved = sum(pro_save,anti_save,na.rm=T),
516
+ total_interactions = sum(pro_up,pro_down,anti_up,anti_down,pro_save,anti_save,na.rm=T),
517
+ positive_interactions = total_likes + total_saved - total_dislikes
518
+ )
519
+
520
+ ytrecs <- ytrecs %>%
521
+ mutate(seed = str_replace(topic_id,".*_([p,a])$","\\1")) %>%
522
+ mutate(pro_fraction_chosen = case_when(
523
+ seed=="a" ~ pro/(pro+anti-1),
524
+ seed=="p" ~ (pro-1)/(pro+anti-1)
525
+ ))
526
+ # adjust for zeros:
527
+ ytrecs$pro_fraction_chosen[ytrecs$pro==0 & ytrecs$anti==0] <- NA
528
+
529
+
530
+ w123 <- w123 %>%
531
+ ungroup() %>%
532
+ mutate(topic_id = str_replace(video_link_w2,".*&topicid=(.*)&allowDupe=1&id=(.*)$","\\1"),
533
+ urlid = str_replace(video_link_w2,".*&topicid=(.*?)&allowDupe=1&id=(.*)$","\\2"),
534
+ )
535
+
536
+
537
+ w123 <- left_join(w123,ytrecs,by=c("topic_id","urlid"))
538
+
539
+ w123 <- w123 %>%
540
+ arrange(worker_id, start_time2) %>%
541
+ group_by(worker_id) %>%
542
+ slice(1) %>% # Keep first resp
543
+ ungroup()
544
+
545
+ print("ISSUE 2 NUMBERS (MTURK):")
546
+ print(paste('count w/ valid ytrecs data:', sum(!is.na(w123$pro))))
547
+ print(paste('count w/ valid ytrecs interactions:', sum(!is.na(w123$total_thumbs))))
548
+ print('interactions:')
549
+ summary(w123$total_interactions)
550
+
551
+ # create numeric dosage version of treatment:
552
+ w123 <- w123 %>%
553
+ mutate(treatment_dose = recode(treatment_arm,
554
+ "anti_31"= 1, "anti_22" = 0,
555
+ "pro_31"= 1, "pro_22" = 0,
556
+ "control"=NA_real_),
557
+ treatment_seed = str_replace(treatment_arm,"(.*)\\_\\d{2}","\\1")
558
+ )
559
+
560
+ write_csv(w123, "../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w123_clean.csv")
code/gun control (issue 1)/03_analysis_multipletesting.R ADDED
@@ -0,0 +1,1293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: gun control (issue 1)/03_analysis_multipletesting.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ library(data.table)
9
+ library(car)
10
+ library(sandwich)
11
+ library(lmtest)
12
+ library(ggplot2)
13
+ library(assertthat)
14
+ library(foreach)
15
+ library(doParallel)
16
+ registerDoParallel(cores = detectCores() - 1)
17
+
18
+
19
+
20
+ ###############
21
+ ## functions ##
22
+ ###############
23
+
24
+ `%.%` <- paste0
25
+
26
+ simes <- function(ps){
27
+ min(sort(length(ps) * ps / rank(ps)))
28
+ }
29
+
30
+ ### functions to handle inconsistent interaction ordering of mlm() ###
31
+
32
+ ## convert interaction terms of form 'b#:a#' to 'a#:b#'
33
+ reorder.interaction.names <- function(x, prefix = ''){
34
+ x <- gsub('^' %.% prefix, '', x)
35
+ sapply(strsplit(x, ':'),
36
+ function(y){
37
+ paste(sort(y), collapse = ':')
38
+ })
39
+ }
40
+
41
+ ## take term of form 'a1:b1', look up in vector of form 'b#:a#, return 'b1:a1'
42
+ convert.interaction.names <- function(x, y, prefix.y = ''){
43
+ ind <- match(reorder.interaction.names(x),
44
+ reorder.interaction.names(y, prefix = prefix.y)
45
+ )
46
+ return(y[ind])
47
+ }
48
+
49
+ ## modified from print.linearHypothesis.mlm to use alternate df & return pvals
50
+ ## (print method is responsible for doing the actual computation of pvals)
51
+ extract.lht <- function(x,
52
+ SSP = TRUE,
53
+ SSPE = SSP,
54
+ digits = getOption('digits'),
55
+ df.residual = x$df.residual
56
+ ){
57
+ test <- x$test
58
+ if (!is.null(x$P) && SSP) {
59
+ P <- x$P
60
+ cat("\n Response transformation matrix:\n")
61
+ attr(P, "assign") <- NULL
62
+ attr(P, "contrasts") <- NULL
63
+ print(P, digits = digits)
64
+ }
65
+ if (SSP) {
66
+ cat("\nSum of squares and products for the hypothesis:\n")
67
+ print(x$SSPH, digits = digits)
68
+ }
69
+ if (SSPE) {
70
+ cat("\nSum of squares and products for error:\n")
71
+ print(x$SSPE, digits = digits)
72
+ }
73
+ if ((!is.null(x$singular)) && x$singular) {
74
+ warning("the error SSP matrix is singular; multivariate tests are unavailable")
75
+ return(invisible(x))
76
+ }
77
+ SSPE.qr <- qr(x$SSPE)
78
+ eigs <- Re(eigen(qr.coef(SSPE.qr, x$SSPH), symmetric = FALSE)$values)
79
+ tests <- matrix(NA, 4, 4)
80
+ rownames(tests) <- c("Pillai", "Wilks", "Hotelling-Lawley",
81
+ "Roy")
82
+ if ("Pillai" %in% test)
83
+ tests[1, 1:4] <- car:::Pillai(eigs, x$df, df.residual)
84
+ if ("Wilks" %in% test)
85
+ tests[2, 1:4] <- car:::Wilks(eigs, x$df, df.residual)
86
+ if ("Hotelling-Lawley" %in% test)
87
+ tests[3, 1:4] <- car:::HL(eigs, x$df, df.residual)
88
+ if ("Roy" %in% test)
89
+ tests[4, 1:4] <- car:::Roy(eigs, x$df, df.residual)
90
+ tests <- na.omit(tests)
91
+ ok <- tests[, 2] >= 0 & tests[, 3] > 0 & tests[, 4] > 0
92
+ ok <- !is.na(ok) & ok
93
+ tests <- cbind(x$df, tests, pf(tests[ok, 2], tests[ok, 3],
94
+ tests[ok, 4], lower.tail = FALSE))
95
+ colnames(tests) <- c("Df", "test stat", "approx F", "num Df",
96
+ "den Df", "Pr(>F)")
97
+ tests <- structure(as.data.frame(tests),
98
+ heading = paste("\nMultivariate Test",
99
+ if (nrow(tests) > 1)
100
+ "s", ": ", x$title, sep = ""),
101
+ class = c("anova",
102
+ "data.frame"
103
+ )
104
+ )
105
+ return(tests)
106
+ }
107
+
108
+
109
+
110
+ ###############
111
+ ## load data ##
112
+ ###############
113
+
114
+ d <- fread('../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w123_clean.csv')
115
+
116
+ ## drop pure control
117
+ d <- d[treatment_arm != 'control',]
118
+
119
+ ## drop NA video counts
120
+ d <- d[!is.na(pro) & !is.na(anti),]
121
+
122
+
123
+
124
+ ##############
125
+ ## controls ##
126
+ ##############
127
+
128
+ platform.controls <- c('age_cat',
129
+ 'male',
130
+ 'pol_interest',
131
+ 'freq_youtube',
132
+ 'fav_channels',
133
+ 'popular_channels',
134
+ 'vid_pref',
135
+ 'gun_enthusiasm',
136
+ 'gun_importance'
137
+ )
138
+
139
+ gunpolicy.controls <- 'gun_index'
140
+
141
+ media.controls <- c('trust_majornews_w1',
142
+ 'trust_youtube_w1',
143
+ 'fabricate_majornews_w1',
144
+ 'fabricate_youtube_w1'
145
+ )
146
+
147
+ affpol.controls <- c('affpol_ft',
148
+ 'affpol_smart',
149
+ 'affpol_comfort'
150
+ )
151
+
152
+ controls.raw <- unique(c(platform.controls,
153
+ gunpolicy.controls,
154
+ media.controls,
155
+ affpol.controls
156
+ )
157
+ )
158
+
159
+ ## transform control variables by creating dummies and demeaning
160
+ controls.trans <- list()
161
+ for (j in controls.raw){
162
+ ## convert to dummies if needed
163
+ controls.j <- model.matrix(as.formula('~ 0 + ' %.% j),
164
+ model.frame(as.formula('~ 0 + ' %.% j),
165
+ data = d,
166
+ na.action = 'na.pass'
167
+ )
168
+ )
169
+ ## demean by column
170
+ controls.j <- sweep(controls.j,
171
+ MARGIN = 2,
172
+ STATS = colMeans(controls.j, na.rm = TRUE),
173
+ FUN = `-`,
174
+ )
175
+ colnames(controls.j) <- make.names(colnames(controls.j))
176
+ ## remove control from original data
177
+ d[[j]] <- NULL
178
+ ## reinsert transformed control
179
+ d <- cbind(d, controls.j)
180
+ ## keep track of which original controls map to which transformed controls
181
+ controls.trans[[j]] <- colnames(controls.j)
182
+ }
183
+
184
+ ## map original control variables to transformed versions
185
+ platform.controls <- unlist(controls.trans[platform.controls])
186
+ gunpolicy.controls <- unlist(controls.trans[gunpolicy.controls])
187
+ media.controls <- unlist(controls.trans[media.controls])
188
+ affpol.controls <- unlist(controls.trans[affpol.controls])
189
+
190
+
191
+
192
+ ##############
193
+ ## outcomes ##
194
+ ##############
195
+
196
+ ### hypothesis family 1: platform interactions ###
197
+
198
+ ## platform interaction time: compute windorized usage time
199
+ warning('diverges from pap, 95% windsorized due to extreme outliers')
200
+ d[, platform_duration := duration]
201
+ d[platform_duration <= quantile(d$duration, .025),
202
+ platform_duration := quantile(d$duration, .025)
203
+ ]
204
+ d[platform_duration >= quantile(d$duration, .975),
205
+ platform_duration := quantile(d$duration, .975)
206
+ ]
207
+ ## all platform interaction outcomes
208
+ platform.outcomes <- c('pro_fraction_chosen',
209
+ 'positive_interactions', # positive - negative (dislike)
210
+ 'platform_duration'
211
+ )
212
+
213
+
214
+
215
+ ### hypothesis family 2: gun policy attitudes ###
216
+
217
+ ## only one preregistered outcome in this family
218
+ gunpolicy.outcomes <- 'gun_index_w2'
219
+ ## added 4 jun 2024 by reviewer request
220
+ gunpolicy.outcomes.understanding <- c('right_to_own_importance_w2',
221
+ 'concealed_safe_w2'
222
+ )
223
+
224
+
225
+
226
+ ### hypothesis family 3: media trust ###
227
+ media.outcomes <- c('trust_majornews_w2',
228
+ 'trust_youtube_w2',
229
+ 'fabricate_majornews_w2',
230
+ 'fabricate_youtube_w2'
231
+ )
232
+
233
+
234
+
235
+ ### hypothesis family 4: affective polarization ###
236
+ affpol.outcomes <- c('affpol_ft_w2',
237
+ 'affpol_smart_w2',
238
+ 'affpol_comfort_w2'
239
+ )
240
+
241
+ outcomes <- unique(c(platform.outcomes,
242
+ gunpolicy.outcomes,
243
+ media.outcomes,
244
+ affpol.outcomes
245
+ )
246
+ )
247
+
248
+
249
+
250
+ ################
251
+ ## treatments ##
252
+ ################
253
+
254
+ ## create attitude dummies
255
+ d[, attitude := c('anti', 'neutral', 'pro')[thirds]]
256
+ d[, attitude.anti := as.numeric(attitude == 'anti')]
257
+ d[, attitude.neutral := as.numeric(attitude == 'neutral')]
258
+ d[, attitude.pro := as.numeric(attitude == 'pro')]
259
+
260
+ ## create seed dummies
261
+ d[, seed.anti := as.numeric(treatment_seed == 'anti')]
262
+ d[, seed.pro := as.numeric(treatment_seed == 'pro')]
263
+
264
+ ## create recsys dummies
265
+ d[, recsys.22 := as.numeric(treatment_arm %like% '22')]
266
+ d[, recsys.31 := as.numeric(treatment_arm %like% '31')]
267
+
268
+ ## manually define coefficients to estimate
269
+ treatments <- c('attitude.anti:recsys.22',
270
+ 'attitude.anti:recsys.31',
271
+ 'attitude.neutral:seed.anti:recsys.22',
272
+ 'attitude.neutral:seed.pro:recsys.22',
273
+ 'attitude.neutral:seed.anti:recsys.31',
274
+ 'attitude.neutral:seed.pro:recsys.31',
275
+ 'attitude.pro:recsys.22',
276
+ 'attitude.pro:recsys.31'
277
+ )
278
+
279
+ contrasts <- rbind(
280
+ i = c(treat = 'attitude.pro:recsys.31',
281
+ ctrl = 'attitude.pro:recsys.22'
282
+ ),
283
+ ii = c(treat = 'attitude.anti:recsys.31',
284
+ ctrl = 'attitude.anti:recsys.22'
285
+ ),
286
+ iii = c(treat = 'attitude.neutral:seed.pro:recsys.31',
287
+ ctrl = 'attitude.neutral:seed.pro:recsys.22'
288
+ ),
289
+ iv = c(treat = 'attitude.neutral:seed.anti:recsys.31',
290
+ ctrl = 'attitude.neutral:seed.anti:recsys.22'
291
+ ),
292
+ v = c(treat = 'attitude.neutral:seed.pro:recsys.31',
293
+ ctrl = 'attitude.neutral:seed.anti:recsys.31'
294
+ ),
295
+ vi = c(treat = 'attitude.neutral:seed.pro:recsys.22',
296
+ ctrl = 'attitude.neutral:seed.anti:recsys.22'
297
+ )
298
+ )
299
+
300
+ ## check that contrasts are valid
301
+ assert_that(all(unlist(contrasts) %in% treatments))
302
+
303
+ ## check that specifications are equivalent
304
+ coefs.v1 <- coef(lm(gun_index_w2 ~ 0 + attitude:treatment_arm, d))
305
+ coefs.v2 <- coef(
306
+ lm(gun_index_w2 ~
307
+ 0 +
308
+ attitude.anti:recsys.22 +
309
+ attitude.anti:recsys.31 +
310
+ attitude.neutral:seed.anti:recsys.22 +
311
+ attitude.neutral:seed.pro:recsys.22 +
312
+ attitude.neutral:seed.anti:recsys.31 +
313
+ attitude.neutral:seed.pro:recsys.31 +
314
+ attitude.pro:recsys.22 +
315
+ attitude.pro:recsys.31,
316
+ d
317
+ )
318
+ )
319
+ assert_that(all.equal(unname(sort(coefs.v1)), unname(sort(coefs.v2))))
320
+
321
+
322
+
323
+ ##########################
324
+ ## hierarchical testing ##
325
+ ##########################
326
+
327
+ ## initialize top layer p-values:
328
+ ## does treatment have any effect on any outcome in family
329
+ families <- c('platform',
330
+ 'gunpolicy',
331
+ 'media',
332
+ 'affpol'
333
+ )
334
+ layer1.pvals <- rep(NA_real_, length(families))
335
+ layer1.notes <- rep('', length(families))
336
+ names(layer1.pvals) <- families
337
+
338
+ ## initialize 2nd layer p-values:
339
+ ## which treatment has detectable effect?
340
+ contrast.pvals <- rep(NA_real_, nrow(contrasts))
341
+ names(contrast.pvals) <- paste(contrasts[, 'treat'],
342
+ contrasts[, 'ctrl'],
343
+ sep = '.vs.'
344
+ )
345
+ layer2.pvals <- list(platform = contrast.pvals,
346
+ gunpolicy = contrast.pvals,
347
+ media = contrast.pvals,
348
+ affpol = contrast.pvals
349
+ )
350
+ rm(contrast.pvals)
351
+
352
+ ## initialize 3rd layer p-values:
353
+ ## on which specific outcome in family?
354
+ layer3.pvals <- list()
355
+ layer3.ests <- list()
356
+ layer3.ses <- list()
357
+ layer3.notes <- list()
358
+ for (i in 1:length(families)){
359
+ family <- families[i]
360
+ layer3.pvals[[family]] <- list()
361
+ layer3.ests[[family]] <- list()
362
+ layer3.ses[[family]] <- list()
363
+ layer3.notes[[family]] <- list()
364
+ outcomes <- get(family %.% '.outcomes')
365
+ for (j in 1:nrow(contrasts)){
366
+ contrast <- paste(contrasts[j, 'treat'],
367
+ contrasts[j, 'ctrl'],
368
+ sep = '.vs.'
369
+ )
370
+ layer3.pvals[[family]][[contrast]] <- numeric(0)
371
+ layer3.ests[[family]][[contrast]] <- numeric(0)
372
+ layer3.ses[[family]][[contrast]] <- numeric(0)
373
+ for (k in 1:length(outcomes)){
374
+ outcome <- outcomes[k]
375
+ layer3.pvals[[family]][[contrast]][outcome] <- NA_real_
376
+ layer3.ests[[family]][[contrast]][outcome] <- NA_real_
377
+ layer3.ses[[family]][[contrast]][outcome] <- NA_real_
378
+ layer3.notes[[family]][outcome] <- ''
379
+ }
380
+ }
381
+ }
382
+
383
+
384
+
385
+ ### begin nested analyses ###
386
+
387
+ for (i in 1:length(families)){
388
+
389
+ family <- families[i]
390
+ family.outcomes <- get(family %.% '.outcomes')
391
+ family.controls <- get(family %.% '.controls')
392
+ family.controls.interactions <- as.character(
393
+ outer(treatments,
394
+ family.controls,
395
+ FUN = function(x, y) x %.% ':' %.% y
396
+ )
397
+ )
398
+
399
+ family.formula <-
400
+ 'cbind(' %.% # outcomes
401
+ paste(family.outcomes,
402
+ collapse = ', '
403
+ ) %.% ') ~\n0 +\n' %.%
404
+ paste(treatments, # treatments (base terms)
405
+ collapse = ' +\n'
406
+ ) %.% ' +\n' %.%
407
+ paste(family.controls, # controls (base terms)
408
+ collapse = ' +\n'
409
+ )
410
+
411
+ cat(rep('=', 80),
412
+ '\n\nHYPOTHESIS FAMILY: ',
413
+ family,
414
+ '\n\nrunning mlm:\n\n',
415
+ family.formula,
416
+ '\n\n',
417
+ sep = ''
418
+ )
419
+
420
+ ## run model
421
+ family.mod <- lm(family.formula, d)
422
+ ## hack to eliminate NA coefs
423
+ if (any(is.na(coef(family.mod)))){
424
+ if ('mlm' %in% class(family.mod)){
425
+ drop <- rownames(coef(family.mod))[is.na(coef(family.mod))[, 1]]
426
+ } else {
427
+ drop <- names(coef(family.mod))[is.na(coef(family.mod))]
428
+ }
429
+ drop <- convert.interaction.names(drop,
430
+ c(family.controls,
431
+ family.controls.interactions
432
+ )
433
+ )
434
+ layer1.notes[[i]] <-
435
+ layer1.notes[[i]] %.%
436
+ 'dropped the following coefs: ' %.%
437
+ paste(drop, sep = ', ') %.%
438
+ '\n\n'
439
+ family.formula <- gsub(
440
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
441
+ '',
442
+ family.formula
443
+ )
444
+ family.mod <- lm(family.formula, d)
445
+ }
446
+
447
+ family.vcov <- vcovHC(family.mod)
448
+ if (is.null(dim(coef(family.mod)))){
449
+ coef.names <- names(coef(family.mod))
450
+ } else {
451
+ coef.names <- rownames(coef(family.mod))
452
+ }
453
+
454
+ ### top layer: test overall significance of all contrasts on all outcomes ###
455
+ ## convert interaction terms to whatever mlm() named it
456
+ treats <- convert.interaction.names(contrasts[, 'treat'], coef.names)
457
+ ctrls <- convert.interaction.names(contrasts[, 'ctrl'], coef.names)
458
+ ## test jointly
459
+ lht.attempt <- tryCatch({
460
+ if ('mlm' %in% class(family.mod)){
461
+ contrast.lht <- linearHypothesis(
462
+ family.mod,
463
+ vcov. = family.vcov,
464
+ hypothesis.matrix = sprintf('%s - %s', treats, ctrls),
465
+ rhs = matrix(0, nrow = nrow(contrasts), ncol = length(family.outcomes)),
466
+ test = 'Pillai'
467
+ )
468
+ layer1.pvals[[i]] <- extract.lht(contrast.lht)[, 'Pr(>F)']
469
+ } else {
470
+ contrast.lht <- linearHypothesis(
471
+ family.mod,
472
+ vcov. = family.vcov,
473
+ hypothesis.matrix = sprintf('%s - %s', treats, ctrls),
474
+ rhs = matrix(0, nrow = nrow(contrasts), ncol = length(family.outcomes)),
475
+ test = 'F'
476
+ )
477
+ layer1.pvals[[i]] <- contrast.lht[['Pr(>F)']][2]
478
+ }
479
+ },
480
+ error = function(e){
481
+ warning(sprintf('caught error in %s family:', family), e)
482
+ ## return error as string for inclusion in notes
483
+ 'caught error: ' %.%
484
+ e %.%
485
+ '\n\n'
486
+ })
487
+ if (lht.attempt %like% 'caught error'){
488
+ layer1.notes[[i]] <-
489
+ layer1.notes[[i]] %.% lht.attempt
490
+ }
491
+
492
+
493
+
494
+ ### layer 2: test each contrast individually on all outcomes ###
495
+
496
+ for (j in 1:nrow(contrasts)){
497
+ ## test group equality on all outcomes
498
+ if ('mlm' %in% class(family.mod)){
499
+ contrast.lht <-
500
+ linearHypothesis(
501
+ family.mod,
502
+ vcov. = family.vcov,
503
+ hypothesis.matrix = sprintf('%s - %s', treats[j], ctrls[j]),
504
+ rhs = matrix(0, nrow = 1, ncol = length(family.outcomes)),
505
+ test = 'Pillai'
506
+ )
507
+ layer2.pvals[[i]][j] <- extract.lht(contrast.lht)[, 'Pr(>F)']
508
+ } else {
509
+ contrast.lht <- linearHypothesis(
510
+ family.mod,
511
+ vcov. = family.vcov,
512
+ hypothesis.matrix = sprintf('%s - %s', treats[j], ctrls[j]),
513
+ rhs = matrix(0, nrow = 1, ncol = length(family.outcomes)),
514
+ test = 'F'
515
+ )
516
+ layer2.pvals[[i]][j] <- contrast.lht[['Pr(>F)']][2]
517
+ }
518
+ }
519
+
520
+ ### layer 3: test each contrast on each outcome individually ###
521
+
522
+ for (k in 1:length(family.outcomes)){
523
+
524
+ outcome <- family.outcomes[k]
525
+
526
+ outcome.formula <-
527
+ outcome %.% ' ~\n0 +\n' %.%
528
+ paste(treatments, # treatments (base terms)
529
+ collapse = ' +\n'
530
+ ) %.% ' +\n' %.%
531
+ paste(family.controls, # controls (base terms)
532
+ collapse = ' +\n'
533
+ )
534
+
535
+ cat(rep('-', 40), '\n\nrunning lm:\n\n', outcome.formula, '\n\n', sep = '')
536
+
537
+ outcome.mod <- lm(outcome.formula, d)
538
+ ## hack to eliminate NA coefs
539
+ if (any(is.na(coef(outcome.mod)))){
540
+ drop <- names(coef(outcome.mod))[is.na(coef(outcome.mod))]
541
+ drop <- convert.interaction.names(drop,
542
+ c(family.controls,
543
+ family.controls.interactions
544
+ )
545
+ )
546
+ layer3.notes[[i]][k] <-
547
+ layer3.notes[[i]][k] %.%
548
+ 'dropped the following coefs: ' %.%
549
+ paste(drop, sep = ', ') %.%
550
+ '\n\n'
551
+ outcome.formula <- gsub(
552
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
553
+ '',
554
+ outcome.formula
555
+ )
556
+ outcome.mod <- lm(outcome.formula, d)
557
+ }
558
+
559
+ outcome.vcov <- vcovHC(outcome.mod)
560
+ if (any(!is.finite(outcome.vcov))){
561
+ outcome.vcov <- vcov(outcome.mod)
562
+ layer3.notes[[i]][k] <-
563
+ layer3.notes[[i]][k] %.%
564
+ 'falling back to non-robust vcov\n\n'
565
+ }
566
+ coef.names <- names(coef(outcome.mod))
567
+
568
+ for (j in 1:nrow(contrasts)){
569
+
570
+ ## convert this interaction term to whatever lm() named it
571
+ treat <- convert.interaction.names(contrasts[j, 'treat'], coef.names)
572
+ ctrl <- convert.interaction.names(contrasts[j, 'ctrl'], coef.names)
573
+ ## test group equality on this outcome
574
+ contrast.lht <- linearHypothesis(
575
+ outcome.mod,
576
+ vcov. = outcome.vcov,
577
+ hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
578
+ test = 'F'
579
+ )
580
+ layer3.pvals[[i]][[j]][k] <- contrast.lht[['Pr(>F)']][2]
581
+ layer3.ests[[i]][[j]][k] <- (
582
+ coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]
583
+ ) ## * attr(d[[outcome]], 'scaled:scale') # note: uncomment if rescaling
584
+ layer3.ses[[i]][[j]][k] <- sqrt(
585
+ outcome.vcov[treat, treat] +
586
+ outcome.vcov[ctrl, ctrl] -
587
+ 2 * outcome.vcov[treat, ctrl]
588
+ )
589
+
590
+ ## ## confirm
591
+ ## linearHypothesis(
592
+ ## outcome.mod,
593
+ ## vcov. = outcome.vcov,
594
+ ## hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
595
+ ## test = 'F'
596
+ ## )
597
+ ## (coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl])^2 /
598
+ ## (
599
+ ## outcome.vcov[treat, treat] +
600
+ ## outcome.vcov[ctrl, ctrl] -
601
+ ## 2 * outcome.vcov[treat, ctrl]
602
+ ## )
603
+ ## linearHypothesis(
604
+ ## outcome.mod,
605
+ ## vcov. = outcome.vcov,
606
+ ## hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
607
+ ## test = 'Chisq'
608
+ ## )
609
+ ## 2 - 2 * pnorm(abs(
610
+ ## (coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]) /
611
+ ## sqrt(
612
+ ## outcome.vcov[treat, treat] +
613
+ ## outcome.vcov[ctrl, ctrl] -
614
+ ## 2 * outcome.vcov[treat, ctrl]
615
+ ## )
616
+ ## ))
617
+
618
+ }
619
+
620
+ }
621
+
622
+ }
623
+
624
+
625
+
626
+ #################################
627
+ ## multiple testing correction ##
628
+ #################################
629
+
630
+ thresh <- .05
631
+
632
+ ## if layer-1 f-test is infeasible for a family due to collinearity,
633
+ ## obtain layer-1 p-values for that family by simes
634
+ for (i in which(is.na(layer1.pvals))){
635
+ layer1.pvals[i] <- simes(layer2.pvals[[i]])
636
+ }
637
+
638
+ ## multiple testing adjustment for layer 1
639
+ layer1.pvals.adj <- p.adjust(layer1.pvals, 'BH')
640
+ layer1.nonnull.prop <- mean(layer1.pvals.adj < thresh)
641
+
642
+ ## test layer-2 hypotheses only if layer 1 passes
643
+ layer2.pvals.adj <- layer2.pvals # start by copying unadjusted layer-2 p-values
644
+ layer2.nonnull.prop <- rep(NA, length(layer1.pvals.adj))
645
+ names(layer2.nonnull.prop) <- names(layer1.pvals.adj)
646
+ for (i in 1:length(layer1.pvals)){
647
+ if (layer1.pvals.adj[i] < thresh){ # if layer 1 passes
648
+ ## adjust for multiplicity within layer 2...
649
+ layer2.pvals.adj[[i]] <- p.adjust(layer2.pvals[[i]], 'BH')
650
+ ## ... and inflate to account for selection at layer 1
651
+ layer2.pvals.adj[[i]] <-
652
+ pmin(layer2.pvals.adj[[i]] / layer1.nonnull.prop, 1)
653
+ ## keep track of selection at layer 2 for use in layer 3
654
+ layer2.nonnull.prop[i] <- mean(layer2.pvals.adj[[i]] < thresh)
655
+ } else { # if layer 1 fails
656
+ layer2.pvals.adj[[i]] <- rep(NA_real_, length(layer2.pvals[[i]]))
657
+ names(layer2.pvals.adj[[i]]) <- names(layer2.pvals[[i]])
658
+ }
659
+ }
660
+
661
+ ## test layer-3 hypotheses only if layers 1 & 2 pass
662
+ layer3.pvals.adj <- layer3.pvals # start by copying unadjusted layer-3 p-values
663
+ for (i in 1:length(layer1.pvals.adj)){
664
+ for (j in 1:length(layer2.pvals.adj[[i]])){
665
+ ##
666
+ if (layer1.pvals.adj[i] < thresh && # if layer 1 passes...
667
+ layer2.pvals.adj[[i]][j] < thresh # ... and if layer 2 passes
668
+ ){
669
+ ## adjust for multiplicity within layer 3...
670
+ layer3.pvals.adj[[i]][[j]] <- p.adjust(layer3.pvals[[i]][[j]], 'BH')
671
+ ## ... and inflate to account for selection at layer 1
672
+ layer3.pvals.adj[[i]][[j]] <- pmin(
673
+ layer3.pvals.adj[[i]][[j]] / layer1.nonnull.prop / layer2.nonnull.prop[i],
674
+ 1
675
+ )
676
+ } else {
677
+ layer3.pvals.adj[[i]][[j]] <- rep(NA_real_, length(layer3.pvals[[i]][[j]]))
678
+ names(layer3.pvals.adj[[i]][[j]]) <- names(layer3.pvals[[i]][[j]])
679
+ }
680
+ }
681
+ }
682
+
683
+ pvals.adj <- data.table(layer1 = character(0),
684
+ layer2 = character(0),
685
+ layer3 = character(0),
686
+ p.adj = numeric(0),
687
+ est = numeric(0),
688
+ se = numeric(0)
689
+ )
690
+ for (i in 1:length(layer1.pvals.adj)){
691
+ pvals.adj <- rbind(pvals.adj,
692
+ data.table(layer1 = names(layer1.pvals.adj)[i],
693
+ layer2 = 'overall',
694
+ layer3 = 'overall',
695
+ p.adj = layer1.pvals.adj[i],
696
+ est = NA_real_,
697
+ se = NA_real_
698
+ )
699
+ )
700
+ for (j in 1:length(layer2.pvals.adj[[i]])){
701
+ pvals.adj <- rbind(pvals.adj,
702
+ data.table(layer1 = names(layer1.pvals.adj)[i],
703
+ layer2 = names(layer2.pvals.adj[[i]])[j],
704
+ layer3 = 'overall',
705
+ p.adj = layer2.pvals.adj[[i]][j],
706
+ est = NA_real_,
707
+ se = NA_real_
708
+ )
709
+ )
710
+ for (k in 1:length(layer3.pvals.adj[[i]][[j]])){
711
+ pvals.adj <- rbind(pvals.adj,
712
+ data.table(layer1 = names(layer1.pvals.adj)[i],
713
+ layer2 = names(layer2.pvals.adj[[i]])[j],
714
+ layer3 = names(layer3.pvals.adj[[i]][[j]])[k],
715
+ p.adj = layer3.pvals.adj[[i]][[j]][k],
716
+ est = layer3.ests[[i]][[j]][k],
717
+ se = layer3.ses[[i]][[j]][k]
718
+ )
719
+ )
720
+ }
721
+ }
722
+ }
723
+
724
+ ## write out
725
+ fwrite(pvals.adj, '../results/intermediate data/guncontrol_padj_basecontrol.csv')
726
+
727
+ ## prettify for reading
728
+ pvals.adj.pretty <- pvals.adj
729
+ colnames(pvals.adj.pretty) <- gsub('layer1',
730
+ 'layer1_hypothesisfamily',
731
+ colnames(pvals.adj.pretty)
732
+ )
733
+ colnames(pvals.adj.pretty) <- gsub('layer2',
734
+ 'layer2_treatmentcontrast',
735
+ colnames(pvals.adj.pretty)
736
+ )
737
+ colnames(pvals.adj.pretty) <- gsub('layer3',
738
+ 'layer3_specificoutcome',
739
+ colnames(pvals.adj.pretty)
740
+ )
741
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
742
+ 'attitude\\.(pro|anti|neutral)(:seed\\.(pro|anti))?:recsys.(31|22)',
743
+ '\\1 \\3 \\4',
744
+ layer2_treatmentcontrast
745
+ )]
746
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
747
+ '.vs.',
748
+ ' - ',
749
+ layer2_treatmentcontrast,
750
+ fixed = TRUE
751
+ )]
752
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
753
+ ' +',
754
+ ' ',
755
+ layer2_treatmentcontrast
756
+ )]
757
+ fwrite(pvals.adj.pretty,
758
+ '../results/intermediate data/gun control (issue 1)/guncontrol_padj_basecontrol_pretty.csv'
759
+ )
760
+
761
+ print('preregistered results:')
762
+ pvals.adj.pretty[p.adj < .05 & layer3_specificoutcome != 'overall',]
763
+
764
+
765
+
766
+ ##############################################
767
+ ## added 4 jun 2024 at request of reviewers ##
768
+ ##############################################
769
+
770
+ ## analyze components of main policy outcome index that relate to
771
+ ## post-experiment w2 "understanding" of an issue, using w1 version
772
+ ## of that same outcome as the only control (analogous to outcome index
773
+ ## regression, which uses w2 index as outcome and w1 index as control)
774
+
775
+ ## initialize results table
776
+ understanding.results <- data.table(layer2_treatmentcontrast = character(0),
777
+ layer3_specificoutcome = character(0),
778
+ est = numeric(0),
779
+ se = numeric(0),
780
+ p = numeric(0)
781
+ )
782
+
783
+ ## loop over outcomes
784
+ for (k in 1:length(gunpolicy.outcomes.understanding)){
785
+
786
+ outcome <- gunpolicy.outcomes.understanding[k]
787
+
788
+ outcome.formula <-
789
+ outcome %.% ' ~\n0 +\n' %.%
790
+ paste(treatments, # treatments (base terms)
791
+ collapse = ' +\n'
792
+ ) %.% ' +\n' %.%
793
+ paste(gsub('_w2', '', outcome), # controls (w1 outcome)
794
+ collapse = ' +\n'
795
+ )
796
+
797
+ cat(rep('-', 40), '\n\nrunning lm:\n\n', outcome.formula, '\n\n', sep = '')
798
+
799
+ outcome.mod <- lm(outcome.formula, d)
800
+ ## hack to eliminate NA coefs
801
+ if (any(is.na(coef(outcome.mod)))){
802
+ drop <- names(coef(outcome.mod))[is.na(coef(outcome.mod))]
803
+ drop <- convert.interaction.names(drop,
804
+ c(family.controls,
805
+ family.controls.interactions
806
+ )
807
+ )
808
+ layer3.notes[[i]][k] <-
809
+ layer3.notes[[i]][k] %.%
810
+ 'dropped the following coefs: ' %.%
811
+ paste(drop, sep = ', ') %.%
812
+ '\n\n'
813
+ outcome.formula <- gsub(
814
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
815
+ '',
816
+ outcome.formula
817
+ )
818
+ outcome.mod <- lm(outcome.formula, d)
819
+ }
820
+
821
+ outcome.vcov <- vcovHC(outcome.mod)
822
+ if (any(!is.finite(outcome.vcov))){
823
+ outcome.vcov <- vcov(outcome.mod)
824
+ layer3.notes[[i]][k] <-
825
+ layer3.notes[[i]][k] %.%
826
+ 'falling back to non-robust vcov\n\n'
827
+ }
828
+ coef.names <- names(coef(outcome.mod))
829
+
830
+ ## loop over treatment contrasts
831
+ for (j in 1:nrow(contrasts)){
832
+
833
+ ## convert this interaction term to whatever llm() named it
834
+ treat <- convert.interaction.names(contrasts[j, 'treat'], coef.names)
835
+ ctrl <- convert.interaction.names(contrasts[j, 'ctrl'], coef.names)
836
+ ## test group equality on this outcome
837
+ contrast.lht <- linearHypothesis(
838
+ outcome.mod,
839
+ vcov. = outcome.vcov,
840
+ hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
841
+ test = 'F'
842
+ )
843
+
844
+ ## prettify name of contrast for readability
845
+
846
+ contrast <- treat %.% ' - ' %.% ctrl
847
+ contrast <- gsub('attitude\\.(pro|anti|neutral)', '\\1', contrast)
848
+ contrast <- gsub('seed\\.(pro|anti)', '\\1', contrast)
849
+ contrast <- gsub('recsys.(31|22)', '\\1', contrast)
850
+ contrast <- gsub(':', ' ', contrast)
851
+ contrast <- gsub(' +', ' ', contrast)
852
+
853
+ p <- contrast.lht[['Pr(>F)']][2]
854
+ est <- (
855
+ coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]
856
+ ) ## * attr(d[[outcome]], 'scaled:scale') # note: uncomment if rescaling
857
+ se <- sqrt(
858
+ outcome.vcov[treat, treat] +
859
+ outcome.vcov[ctrl, ctrl] -
860
+ 2 * outcome.vcov[treat, ctrl]
861
+ )
862
+
863
+ understanding.results <- rbind(
864
+ understanding.results,
865
+ data.table(
866
+ layer2_treatmentcontrast = contrast,
867
+ layer3_specificoutcome = outcome,
868
+ p,
869
+ est,
870
+ se
871
+ )
872
+ )
873
+
874
+ }
875
+
876
+ }
877
+
878
+ ## conduct multiple testing adjustment within newly exploratory results
879
+ understanding.results[, p.adj := p.adjust(p, 'BH')]
880
+ print('exploratory results on understanding-related questions:')
881
+ understanding.results[p.adj < .05,]
882
+
883
+ fwrite(understanding.results,
884
+ '../results/intermediate data/gun control (issue 1)/guncontrol_understanding_basecontrol_pretty.csv'
885
+ )
886
+
887
+
888
+
889
+ #############################################################
890
+ ## preregistered exploratory heterogeneous effect analysis ##
891
+ #############################################################
892
+
893
+ # outcome is gun_index_w2
894
+ # construct moderators by cutting demographics & pre-treatment vars at midpoint
895
+
896
+ d[,
897
+ pol_interest_hi := as.numeric(
898
+ pol_interest > median(pol_interest, na.rm = TRUE)
899
+ )]
900
+ d[,
901
+ age_hi := as.numeric(
902
+ age > median(age, na.rm = TRUE)
903
+ )]
904
+ d[,
905
+ freq_youtube_hi := as.numeric(
906
+ freq_youtube > median(freq_youtube, na.rm = TRUE)
907
+ )]
908
+
909
+ moderator_variables <- c('pol_interest_hi',
910
+ 'age_hi',
911
+ 'male',
912
+ 'freq_youtube_hi'
913
+ )
914
+ ## added 4 jun 2024 at request of reviewer
915
+ moderator_variables_revision <- 'college'
916
+
917
+ interaction_results <- data.table()
918
+ for (moderator_variable in c(moderator_variables, moderator_variables_revision)){
919
+
920
+ d[, moderator := get(moderator_variable)]
921
+
922
+ mod.attitude.anti <- lm(
923
+ gun_index_w2 ~
924
+ recsys.31 * moderator +
925
+ gun_index, # only control is pre-treatment outcome, as in primary analysis
926
+ data = d[attitude.anti == 1]
927
+ )
928
+ vcov.attitude.anti <- vcovHC(mod.attitude.anti)
929
+ test.attitude.anti <- coeftest(mod.attitude.anti, vcov.attitude.anti)
930
+ interaction_results <- rbind(
931
+ interaction_results,
932
+ data.table(subset = 'attitude.anti',
933
+ interaction = 'recsys.31:' %.% moderator_variable,
934
+ test.attitude.anti['recsys.31:moderator', , drop = FALSE]
935
+ ),
936
+ fill = TRUE
937
+ )
938
+
939
+ mod.attitude.pro <- lm(
940
+ gun_index_w2 ~
941
+ recsys.31 * moderator +
942
+ gun_index, # only control is pre-treatment outcome, as in primary analysis
943
+ data = d[attitude.pro == 1]
944
+ )
945
+ vcov.attitude.pro <- vcovHC(mod.attitude.pro)
946
+ test.attitude.pro <- coeftest(mod.attitude.pro, vcov.attitude.pro)
947
+ interaction_results <- rbind(
948
+ interaction_results,
949
+ data.table(subset = 'attitude.pro',
950
+ interaction = 'recsys.31:' %.% moderator_variable,
951
+ test.attitude.pro['recsys.31:moderator', , drop = FALSE]
952
+ ),
953
+ fill = TRUE
954
+ )
955
+
956
+ mod.attitude.neutral.seed.anti <- lm(
957
+ gun_index_w2 ~
958
+ recsys.31 * moderator +
959
+ gun_index, # only control is pre-treatment outcome, as in primary analysis
960
+ data = d[attitude.neutral == 1 & seed.anti == 1]
961
+ )
962
+ vcov.attitude.neutral.seed.anti <- vcovHC(mod.attitude.neutral.seed.anti)
963
+ test.attitude.neutral.seed.anti <- coeftest(mod.attitude.neutral.seed.anti,
964
+ vcov.attitude.neutral.seed.anti
965
+ )
966
+ interaction_results <- rbind(
967
+ interaction_results,
968
+ data.table(subset = 'attitude.neutral.seed.anti',
969
+ interaction = 'recsys.31:' %.% moderator_variable,
970
+ test.attitude.neutral.seed.anti[
971
+ 'recsys.31:moderator', , drop = FALSE
972
+ ]
973
+ ),
974
+ fill = TRUE
975
+ )
976
+
977
+ mod.attitude.neutral.seed.pro <- lm(
978
+ gun_index_w2 ~
979
+ recsys.31 * moderator +
980
+ gun_index, # only control is pre-treatment outcome, as in primary analysis
981
+ data = d[attitude.neutral == 1 & seed.pro == 1]
982
+ )
983
+ vcov.attitude.neutral.seed.pro <- vcovHC(mod.attitude.neutral.seed.pro)
984
+ test.attitude.neutral.seed.pro <- coeftest(mod.attitude.neutral.seed.pro,
985
+ vcov.attitude.neutral.seed.pro )
986
+ interaction_results <- rbind(
987
+ interaction_results,
988
+ data.table(subset = 'attitude.neutral.seed.pro',
989
+ interaction = 'recsys.31:' %.% moderator_variable,
990
+ test.attitude.neutral.seed.pro[
991
+ 'recsys.31:moderator', , drop = FALSE
992
+ ]
993
+ ),
994
+ fill = TRUE
995
+ )
996
+
997
+ }
998
+
999
+ # no significant heterogeneity even before multiple testing correction
1000
+ print('heterogeneity results before multiple correction:')
1001
+ interaction_results[`Pr(>|t|)` < .05,]
1002
+ # none survives a BH correction
1003
+ interaction_results[, p.adj := p.adjust(`Pr(>|t|)`, 'BH')]
1004
+ print('heterogeneity p-values after multiple correction:')
1005
+ interaction_results[, p.adj]
1006
+
1007
+ ## updated 4 jun 2024 at request of reviewer
1008
+ colnames(interaction_results) <- c(
1009
+ subset = 'subset',
1010
+ interaction = 'interaction',
1011
+ Estimate = 'est',
1012
+ `Std. Error` = 'se',
1013
+ `t value` = 't',
1014
+ `Pr(>|t|)` = 'p',
1015
+ p.adj = 'p.adj'
1016
+ )[colnames(interaction_results)]
1017
+ fwrite(interaction_results,
1018
+ '../results/intermediate data/gun control (issue 1)/guncontrol_heterogeneity_basecontrol.csv'
1019
+ )
1020
+
1021
+
1022
+
1023
+ ###############################################
1024
+ ## added 30 sep 2024 at request of reviewers ##
1025
+ ###############################################
1026
+
1027
+ ## what are minimum detectable effects, given multiple testing correction?
1028
+
1029
+ n_sims <- 1000
1030
+ params_sims <- expand.grid(seed = 19104 + 0:(n_sims - 1),
1031
+ effect = seq(from = .01, to = .05, by = .001)
1032
+ )
1033
+
1034
+ ## step 1: identify largest p-value s.t. we would have rejected layer-1 null
1035
+ ## (that at least one treatment contrast has effect on policy index)
1036
+ ## to do this, we hold fixed p-values for all other layer-1 hypothesis families
1037
+ layer1.pvals.mde <- layer1.pvals
1038
+ layer1.pvals.mde['gunpolicy'] <- 0
1039
+ while (p.adjust(layer1.pvals.mde, 'BH')['gunpolicy'] <= .05){
1040
+ layer1.pvals.mde['gunpolicy'] <- layer1.pvals.mde['gunpolicy'] + .001
1041
+ }
1042
+ pval.cutoff <- layer1.pvals.mde['gunpolicy']
1043
+ print('to achieve significance of policy attitude family at layer 1 (pooled test of any effect on policy index from any contrast) when correcting for multiple layer-1 hypothesis families, this is the minimum cutoff value after conducting simes correction of layer 2 pvals:')
1044
+ pval.cutoff
1045
+
1046
+ ## if layer-1 null was rejected for the policy outcome, then we would use this
1047
+ ## correction factor when interpreting layer-2 p-values (for specific contrasts)
1048
+ layer1.nonnull.prop.if.gt.cutoff <- mean(c(
1049
+ p.adjust(layer1.pvals.mde, 'BH')[c('platform', 'media', 'affpol')] < .05,
1050
+ TRUE
1051
+ ))
1052
+
1053
+ ## the sims below will only examine 3/1 vs 2/2 treatment contrasts, so we will
1054
+ ## hold fixed the layer-2 p-values that relate to seed contrasts
1055
+ pvals.for.seed.contrasts.on.policyindex <- layer2.pvals$mwpolicy[
1056
+ c('attitude.neutral:seed.pro:recsys.31.vs.attitude.neutral:seed.anti:recsys.31',
1057
+ 'attitude.neutral:seed.pro:recsys.22.vs.attitude.neutral:seed.anti:recsys.22'
1058
+ )
1059
+ ]
1060
+
1061
+
1062
+
1063
+ ## step 2: prepare simulations based on real data ------------------------------
1064
+
1065
+ mod.attitude.anti <- lm(
1066
+ gun_index_w2 ~ recsys.31 + gun_index,
1067
+ data = d[attitude.anti == 1]
1068
+ )
1069
+ X.attitude.anti <- model.matrix(mod.attitude.anti)
1070
+ residual.sd.attitude.anti <- sd(resid(mod.attitude.anti))
1071
+ ## confirm that this recovers fitted values
1072
+ ## model.matrix(mod.attitude.anti) %*% coef(mod.attitude.anti)
1073
+ assert_that(all(
1074
+ predict(mod.attitude.anti) ==
1075
+ X.attitude.anti %*% coef(mod.attitude.anti)
1076
+ ))
1077
+ ## we will create simulated outcomes, given hypothesized treatment effect
1078
+ ## == intercept + <-- part A
1079
+ ## real coef * real pretreatment attitude + <-- part A
1080
+ ## hypothesized treatment effect * real treatment status + <-- part B
1081
+ ## rnorm(mean = 0, sd = real residual outcome sd) <-- part C
1082
+ ## A: generate fitted values under hypothesized effect size
1083
+ coef.attitude.anti.baseline <- coef(mod.attitude.anti)
1084
+ coef.attitude.anti.baseline['recsys.31'] <- 0
1085
+ Y.attitude.anti.baseline <-
1086
+ as.numeric(X.attitude.anti %*% coef.attitude.anti.baseline)
1087
+ ## C: will be added below with hypothesized effect * treatment
1088
+ ## B: will be drawn below with rnorm(mean=0, sd=residual_sd)
1089
+
1090
+ ## repeat above for respondents with pro attitude
1091
+ mod.attitude.pro <- lm(
1092
+ gun_index_w2 ~ recsys.31 + gun_index,
1093
+ data = d[attitude.pro == 1]
1094
+ )
1095
+ X.attitude.pro <- model.matrix(mod.attitude.pro)
1096
+ residual.sd.attitude.pro <- sd(resid(mod.attitude.pro))
1097
+ coef.attitude.pro.baseline <- coef(mod.attitude.pro)
1098
+ coef.attitude.pro.baseline['recsys.31'] <- 0
1099
+ Y.attitude.pro.baseline <-
1100
+ as.numeric(X.attitude.pro %*% coef.attitude.pro.baseline)
1101
+
1102
+ ## repeat above for respondents with neutral attitude assigned to pro seed
1103
+ mod.attitude.neutral.seed.pro <- lm(
1104
+ gun_index_w2 ~ recsys.31 + gun_index,
1105
+ data = d[attitude.neutral == 1 & seed.pro == 1]
1106
+ )
1107
+ X.attitude.neutral.seed.pro <- model.matrix(mod.attitude.neutral.seed.pro)
1108
+ residual.sd.attitude.neutral.seed.pro <- sd(resid(mod.attitude.neutral.seed.pro))
1109
+ coef.attitude.neutral.seed.pro.baseline <- coef(mod.attitude.neutral.seed.pro)
1110
+ coef.attitude.neutral.seed.pro.baseline['recsys.31'] <- 0
1111
+ Y.attitude.neutral.seed.pro.baseline <-
1112
+ as.numeric(X.attitude.neutral.seed.pro %*% coef.attitude.neutral.seed.pro.baseline)
1113
+
1114
+ ## repeat above for respondents with neutral attitude assigned to anti seed
1115
+ mod.attitude.neutral.seed.anti <- lm(
1116
+ gun_index_w2 ~ recsys.31 + gun_index,
1117
+ data = d[attitude.neutral == 1 & seed.anti == 1]
1118
+ )
1119
+ X.attitude.neutral.seed.anti <- model.matrix(mod.attitude.neutral.seed.anti)
1120
+ residual.sd.attitude.neutral.seed.anti <- sd(resid(mod.attitude.neutral.seed.anti))
1121
+ coef.attitude.neutral.seed.anti.baseline <- coef(mod.attitude.neutral.seed.anti)
1122
+ coef.attitude.neutral.seed.anti.baseline['recsys.31'] <- 0
1123
+ Y.attitude.neutral.seed.anti.baseline <-
1124
+ as.numeric(X.attitude.neutral.seed.anti %*% coef.attitude.neutral.seed.anti.baseline)
1125
+
1126
+
1127
+
1128
+ ## step 3: conduct sims --------------------------------------------------------
1129
+
1130
+ sims.attitude.anti <- foreach(seed = params_sims$seed,
1131
+ effect = params_sims$effect,
1132
+ .combine = rbind
1133
+ ) %dopar%
1134
+ {
1135
+ set.seed(seed)
1136
+ Y <-
1137
+ Y.attitude.anti.baseline +
1138
+ effect * X.attitude.anti[, 'recsys.31'] +
1139
+ rnorm(
1140
+ n = nrow(X.attitude.anti),
1141
+ mean = 0,
1142
+ sd = residual.sd.attitude.anti
1143
+ )
1144
+ mod <- lm(Y ~ 0 + X.attitude.anti)
1145
+ smry <- coeftest(mod, vcovHC(mod))
1146
+ cbind(
1147
+ seed,
1148
+ effect,
1149
+ data.table(smry['X.attitude.antirecsys.31', , drop = FALSE])
1150
+ )
1151
+ }
1152
+
1153
+ sims.attitude.pro <- foreach(seed = params_sims$seed,
1154
+ effect = params_sims$effect,
1155
+ .combine = rbind
1156
+ ) %dopar%
1157
+ {
1158
+ set.seed(seed)
1159
+ Y <-
1160
+ Y.attitude.pro.baseline +
1161
+ effect * X.attitude.pro[, 'recsys.31'] +
1162
+ rnorm(
1163
+ n = nrow(X.attitude.pro),
1164
+ mean = 0,
1165
+ sd = residual.sd.attitude.pro
1166
+ )
1167
+ mod <- lm(Y ~ 0 + X.attitude.pro)
1168
+ smry <- coeftest(mod, vcovHC(mod))
1169
+ cbind(
1170
+ seed,
1171
+ effect,
1172
+ data.table(smry['X.attitude.prorecsys.31', , drop = FALSE])
1173
+ )
1174
+ }
1175
+
1176
+ sims.attitude.neutral.seed.anti <- foreach(seed = params_sims$seed,
1177
+ effect = params_sims$effect,
1178
+ .combine = rbind
1179
+ ) %dopar%
1180
+ {
1181
+ set.seed(seed)
1182
+ Y <-
1183
+ Y.attitude.neutral.seed.anti.baseline +
1184
+ effect * X.attitude.neutral.seed.anti[, 'recsys.31'] +
1185
+ rnorm(
1186
+ n = nrow(X.attitude.neutral.seed.anti),
1187
+ mean = 0,
1188
+ sd = residual.sd.attitude.neutral.seed.anti
1189
+ )
1190
+ mod <- lm(Y ~ 0 + X.attitude.neutral.seed.anti)
1191
+ smry <- coeftest(mod, vcovHC(mod))
1192
+ cbind(
1193
+ seed,
1194
+ effect,
1195
+ data.table(smry['X.attitude.neutral.seed.antirecsys.31', , drop = FALSE])
1196
+ )
1197
+ }
1198
+
1199
+ sims.attitude.neutral.seed.pro <- foreach(seed = params_sims$seed,
1200
+ effect = params_sims$effect,
1201
+ .combine = rbind
1202
+ ) %dopar%
1203
+ {
1204
+ set.seed(seed)
1205
+ Y <-
1206
+ Y.attitude.neutral.seed.pro.baseline +
1207
+ effect * X.attitude.neutral.seed.pro[, 'recsys.31'] +
1208
+ rnorm(
1209
+ n = nrow(X.attitude.neutral.seed.pro),
1210
+ mean = 0,
1211
+ sd = residual.sd.attitude.neutral.seed.pro
1212
+ )
1213
+ mod <- lm(Y ~ 0 + X.attitude.neutral.seed.pro)
1214
+ smry <- coeftest(mod, vcovHC(mod))
1215
+ cbind(
1216
+ seed,
1217
+ effect,
1218
+ data.table(smry['X.attitude.neutral.seed.prorecsys.31', , drop = FALSE])
1219
+ )
1220
+ }
1221
+
1222
+
1223
+
1224
+ ## step 4: analyze power results -----------------------------------------------
1225
+
1226
+ ## without multiple-testing corrections
1227
+
1228
+ print('mde for respondents with anti attitude (conventional analysis w/o correction):')
1229
+ sims.attitude.anti[,
1230
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1231
+ by = effect
1232
+ ][p.reject >= .8, min(effect)]
1233
+
1234
+ print('mde for respondents with pro attitude (conventional analysis w/o correction):')
1235
+ sims.attitude.pro[,
1236
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1237
+ by = effect
1238
+ ][p.reject >= .8, min(effect)]
1239
+
1240
+ print('mde for respondents with neutral attitude assigned to pro seed (conventional analysis w/o correction):')
1241
+ sims.attitude.neutral.seed.anti[,
1242
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1243
+ by = effect
1244
+ ][p.reject >= .8, min(effect)]
1245
+
1246
+ print('mde for respondents with neutral attitude assigned to anti seed (conventional analysis w/o correction):')
1247
+ sims.attitude.neutral.seed.pro[,
1248
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1249
+ by = effect
1250
+ ][p.reject >= .8, min(effect)]
1251
+
1252
+
1253
+
1254
+ ## with multiple testing correction
1255
+
1256
+ sims <- rbind(
1257
+ sims.attitude.anti,
1258
+ sims.attitude.pro,
1259
+ sims.attitude.neutral.seed.anti,
1260
+ sims.attitude.neutral.seed.pro
1261
+ )
1262
+
1263
+ sims.layer1 <- sims[
1264
+ ,
1265
+ .(pval.pooled = ifelse(
1266
+ ## if these results would lead us to reject layer-1 pooled null of no effect
1267
+ ## on policy attitudes from any treatment contrast
1268
+ simes(c(
1269
+ `Pr(>|t|)`,
1270
+ pvals.for.seed.contrasts.on.policyindex
1271
+ )) <= pval.cutoff,
1272
+ ## disaggregate layer-2 results report with procedure from above
1273
+ ## (BH correction, then inflate by 1/prop of layer-1 sig results)
1274
+ ## then subset to only those p-values relating to 3/1 vs 2/2 contrast
1275
+ ## to see if any are <.05 after full correction procedure
1276
+ yes = min(
1277
+ p.adjust(c(`Pr(>|t|)`, pvals.for.seed.contrasts.on.policyindex),
1278
+ 'BH'
1279
+ )[1:4] / layer1.nonnull.prop.if.gt.cutoff
1280
+ ),
1281
+ no = Inf
1282
+ )
1283
+ ),
1284
+ by = .(seed, effect)
1285
+ ]
1286
+ print('with multiple testing correction:')
1287
+ sims.layer1[, .(p.reject = mean(pval.pooled <= pval.cutoff)), by = effect]
1288
+ print('mde:')
1289
+ sims.layer1[,
1290
+ .(p.reject = mean(pval.pooled <= pval.cutoff)),
1291
+ by = effect
1292
+ ][p.reject >= .8, min(effect)]
1293
+
code/minimum wage (issue 2)/01_trt_assign.R ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: minimum wage (issue 2)/01_trt_assign.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ library(tidyverse)
9
+ library(janitor)
10
+ library(lubridate)
11
+ library(randomizr)
12
+ library(haven)
13
+
14
+ # create directory to hold cached intermediate files
15
+ dir.create("../results/intermediate data/minimum wage (issue 2)",
16
+ recursive = TRUE, showWarnings = FALSE)
17
+
18
+
19
+ w1 <- read_csv("../data/minimum wage (issue 2)/YouTube+Min+Wage+-+Apr+2022+presurvey_May+24,+2022_02.57.csv")[-c(1,2),] %>% clean_names() %>%
20
+ filter(finished == "True", q62 == "I agree to participate\u2028")
21
+
22
+ # Recodes ======================================================================
23
+
24
+ w1 <- w1 %>% mutate(start_date = as_datetime(start_date),
25
+ end_date = as_datetime(end_date),
26
+ survey_time = as.numeric(end_date-start_date))
27
+
28
+ print('wave 1 survey time:')
29
+ summary(w1$survey_time)
30
+
31
+ w1 <- w1 %>%
32
+ mutate(man = ifelse(q26 == "Man", 1, 0),
33
+ black = ifelse(str_detect(q29, "Black"), 1, 0),
34
+ white = ifelse(str_detect(q29, "White"), 1, 0),
35
+ college = ifelse(str_detect(q30, "college ") | str_detect(q30, "Post"), 1, 0),
36
+ income_gt50k = ifelse(q31 %in% names(table(w1$q31))[c(2,3,5,10:13)], 1, 0)
37
+ )
38
+
39
+ # PID:
40
+ w1$pid <- ifelse(w1$pid1=="Democrat",-1,NA)
41
+ w1$pid <- ifelse(w1$pid1=="Republican",1,w1$pid)
42
+ w1$pid <- ifelse(w1$pid4=="Closer to the Republican Party",1,w1$pid)
43
+ w1$pid <- ifelse(w1$pid4=="Closer to the Democratic Party",-1,w1$pid)
44
+ w1$pid <- ifelse(w1$pid4=="Neither",0,w1$pid)
45
+
46
+ print('wave 1 party id:')
47
+ round(table(w1$pid) / sum(table(w1$pid)), digits=2)
48
+
49
+ w1$ideo <- ifelse(w1$ideo1=="Liberal",-1,NA)
50
+ w1$ideo <- ifelse(w1$ideo1=="Conservative",1,w1$ideo)
51
+ w1$ideo <- ifelse(w1$ideo4=="Closer to liberals",-1,w1$ideo)
52
+ w1$ideo <- ifelse(w1$ideo4=="Closer to conservatives",1,w1$ideo)
53
+ w1$ideo <- ifelse(w1$ideo4=="Neither",0,w1$ideo)
54
+
55
+ print('wave 1 ideology:')
56
+ round(table(w1$ideo) / sum(table(w1$ideo)), digits=2)
57
+
58
+ w1$age <- 2022 - as.numeric(w1$q27)
59
+
60
+
61
+
62
+ # A/V check ====================================================================
63
+
64
+ print("audio check:")
65
+ length(which(w1$q87 == "Quick and easy")) / length(w1$q87)
66
+
67
+ print("video check:")
68
+ length(which(w1$q89 == "wikiHow")) / length(w1$q89)
69
+
70
+ w1$audio_ok <- 1*(w1$q87 == "Quick and easy")
71
+ w1$video_ok <- 1*(w1$q89 == "wikiHow")
72
+
73
+ # Convert pre-treatment DV to numeric unit scale -------------------------------
74
+
75
+ w1 <- w1 %>%
76
+ mutate( # higher = more conservative
77
+ minwage15 = recode(minwage15,"Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
78
+ rtwa_v1 = recode(rtwa_v1, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
79
+ #minwage_inflation = recode(minwage_inflation,"Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
80
+ rtwa_v2 = recode(rtwa_v2, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
81
+ mw_support = recode(mw_support, "Strongly oppose raising the minimum wage"=4,"Somewhat oppose raising the minimum wage"=3,"Neither support nor oppose raising the minimum wage"=2,"Somewhat support raising the minimum wage"=1,"Strongly support raising the minimum wage"=0)/4,
82
+ minwage_howhigh = recode(minwage_howhigh, "Much lower than the current level"=4,"Somewhat lower than the current level"=3,"About the current level"=2,"Somewhat higher than the current level"=1,"Much higher than the current level"=0)/4,
83
+ mw_help = recode(mw_help, "Would hurt low-income workers\n10\n"=9,"9"=8,"8"=7,"7"=6,"6"=5,"5"=4,"4"=3,"3"=2,"2"=1,"Would help low-income workers\n1"=0)/9,
84
+ mw_restrict = recode(mw_restrict, "Would restrict businesses' freedom\n1\n"=9,"2"=8,"3"=7,"4"=6,"5"=5,"6"=4,"7"=3,"8"=2,"9"=1,"Would protect workers from exploitation\n10\n"=0)/9,
85
+ minwage_text = (25-as.numeric(minwage_text))/25
86
+ )
87
+
88
+ w1 <- w1 %>%
89
+ rowwise() %>%
90
+ mutate(mw_index = mean(c(minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text),
91
+ na.rm=T))
92
+
93
+
94
+
95
+ # trim sample -------------------------------------------------------------
96
+
97
+ # We exclude respondents who took less than 120 seconds to complete the Wave 1 survey, failed either
98
+ # an audio check or a video check, as well as those whose gun policy opinions fall within the most
99
+ # extreme 5% of the gun policy index outcome (i.e. < 0.25 or > 4.75 on the 0-5 scale, to guard
100
+ # against eventual ceiling/floor effects; in our pilot study this was 15% of the sample).
101
+
102
+ w1 <- w1 %>% filter(audio_ok == 1, video_ok == 1)
103
+ w1 <- w1 %>% filter(survey_time >= 2)
104
+ w1 <- w1 %>% filter(mw_index >= 0.025, mw_index <= 0.975)
105
+ w1 <- w1 %>% filter(!is.na(worker_id))
106
+ w1 <- w1 %>% distinct(worker_id, .keep_all = TRUE)
107
+
108
+ print('mw index:')
109
+ summary(w1$mw_index)
110
+
111
+
112
+
113
+ # Block random assignment ======================================================
114
+
115
+ # We randomly assign respondents to both a seed video type (pro-gun vs. anti-gun) and a recommendation system (3/1 vs. 2/2)
116
+ # blocking on Wave 1 gun policy opinions. In the sample of respondents
117
+ # who will be invited to Wave 2, we form terciles of the Wave 1 gun policy opinion index, referring
118
+ # to the lower, middle and upper terciles as anti-gun, moderate and pro-gun respectively
119
+
120
+ w1$tercile <- cut(w1$mw_index, breaks = quantile(w1$mw_index, c(0, 1/3, 2/3, 1)), include.lowest = TRUE, labels = 1:3)
121
+ tapply(w1$mw_index, w1$tercile, mean)
122
+ table(w1$tercile)
123
+
124
+ # pure control (with 1/5 probability), anti-MW 2/2 (with 2/5 probability), or anti-MW 3/1 (with 2/5 probability).
125
+ # seed position (pro-MW or anti-MW), recommendation system (2/2 or 3/1), or a
126
+ # pure control group (i.e. one of five possible conditions) with equal probability
127
+
128
+ # For MTurk --------------------------------------------------------------------
129
+
130
+ set.seed(2022)
131
+
132
+ w1$trt_system <- block_ra(blocks = w1$tercile, prob_each = c(2/5, 2/5, 1/5), conditions = c("2/2", "3/1", "pure control"))
133
+
134
+ w1$seed <- rep("", nrow(w1))
135
+ w1[w1$tercile == 1,]$seed <- "pro-minwage seed"
136
+ w1[w1$tercile == 3,]$seed <- "anti-minwage seed"
137
+ w1[w1$tercile == 2,]$seed <- complete_ra(N = length(which(w1$tercile == 2)), prob = 0.5, conditions = c("pro-minwage seed",
138
+ "anti-minwage seed"))
139
+ with(w1[w1$tercile == 1,], round(prop.table(table(seed, trt_system)), digits = 3))
140
+ with(w1[w1$tercile == 2,], round(prop.table(table(seed, trt_system)), digits = 3))
141
+ with(w1[w1$tercile == 3,], round(prop.table(table(seed, trt_system)), digits = 3))
142
+
143
+ w1 <- w1 %>% mutate(trt_assign = case_when(seed == "anti-minwage seed" & trt_system == "2/2" ~ 1,
144
+ seed == "anti-minwage seed" & trt_system == "3/1" ~ 2,
145
+ seed == "pro-minwage seed" & trt_system == "2/2" ~ 3,
146
+ seed == "pro-minwage seed" & trt_system == "3/1" ~ 4,
147
+ trt_system == "pure control" ~ 5))
148
+
149
+ print('treatment assignment:')
150
+ table(w1$trt_assign)
151
+ print('seed assignment:')
152
+ table(w1$seed)
153
+ print('system assignment:')
154
+ table(w1$trt_system)
155
+ print('seed & system assignment:')
156
+ table(w1$trt_system, w1$seed)
157
+
158
+ # w1$batch <- sample(c(rep(1:floor(nrow(w1)/500), 500), rep(6, nrow(w1)-500*5)))
159
+ # sent to Qualtrics
160
+ # write_csv(data.frame(trt = w1$trt_assign, id = w1$worker_id), "mw_mturk_wave1_assignments.csv")
161
+
162
+
163
+
164
+ # YouGov -----------------------------------------------------------------------
165
+
166
+ w1 <- read_sav("../data/minimum wage (issue 2)/PRIN0016_W1_OUTPUT.sav") %>% filter(consent == 22)
167
+ w1$caseid <- as.character(w1$caseid)
168
+
169
+ # Convert pre-treatment DV to numeric unit scale
170
+ w1 <- w1 %>%
171
+ mutate( # higher = more conservative
172
+ minwage15 = (minwage15-1)/4,
173
+ rtwa_v1 = (RTWA_v1-1)/4,
174
+ rtwa_v2 = (RTWA_v2-1)/4,
175
+ mw_support = (mw_support-1)/4,
176
+ minwage_howhigh = (minwage_howhigh-1)/4,
177
+ mw_help = (mw_help_a-1)/9,
178
+ mw_restrict = (10-mw_restrict_1)/9,
179
+ minwage_text = (25-minwage_text)/25
180
+ )
181
+
182
+
183
+ w1 <- w1 %>%
184
+ rowwise() %>%
185
+ mutate(mw_index = mean(c(minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text),
186
+ na.rm=T))
187
+
188
+ w1 <- w1 %>% mutate(start_date = as_datetime(starttime),
189
+ end_date = as_datetime(endtime),
190
+ survey_time = as.numeric(end_date-start_date))
191
+
192
+ print('wave 1 survey time:')
193
+ summary(w1$survey_time)
194
+
195
+ w1 <- w1 %>% filter(survey_time >= 2)
196
+ w1 <- w1 %>% filter(mw_index >= 0.025, mw_index <= 0.975)
197
+
198
+ print('mw index:')
199
+ summary(w1$mw_index)
200
+
201
+ w1$tercile <- cut(w1$mw_index, breaks = quantile(w1$mw_index, c(0, 1/3, 2/3, 1)), include.lowest = TRUE, labels = 1:3)
202
+
203
+ write_csv(select(w1, caseid, tercile, mw_index), "../results/intermediate data/minimum wage (issue 2)/yougov_terciles.csv")
204
+
205
+
206
+ # pure control (with 1/5 probability), anti-MW 2/2 (with 2/5 probability), or anti-MW 3/1 (with 2/5 probability).
207
+ # seed position (pro-MW or anti-MW), recommendation system (2/2 or 3/1), or a
208
+ # pure control group (i.e. one of five possible conditions) with equal probability
209
+
210
+ set.seed(22022)
211
+
212
+ # For YouGov
213
+ w1$trt_system <- block_ra(blocks = w1$tercile, prob_each = c(.5, .5), conditions = c("2/2", "3/1"))
214
+
215
+ w1$seed <- rep("", nrow(w1))
216
+ w1[w1$tercile == 1,]$seed <- "pro-minwage seed"
217
+ w1[w1$tercile == 3,]$seed <- "anti-minwage seed"
218
+ w1[w1$tercile == 2,]$seed <- complete_ra(N = length(which(w1$tercile == 2)), prob = 0.5, conditions = c("pro-minwage seed",
219
+ "anti-minwage seed"))
220
+ with(w1[w1$tercile == 1,], round(prop.table(table(seed, trt_system)), digits = 3))
221
+ with(w1[w1$tercile == 2,], round(prop.table(table(seed, trt_system)), digits = 3))
222
+ with(w1[w1$tercile == 3,], round(prop.table(table(seed, trt_system)), digits = 3))
223
+
224
+ w1 <- w1 %>% mutate(trt_assign = case_when(seed == "anti-minwage seed" & trt_system == "2/2" ~ 1,
225
+ seed == "anti-minwage seed" & trt_system == "3/1" ~ 2,
226
+ seed == "pro-minwage seed" & trt_system == "2/2" ~ 3,
227
+ seed == "pro-minwage seed" & trt_system == "3/1" ~ 4))
228
+
229
+ print('treatment assignment:')
230
+ table(w1$trt_assign)
231
+ print('seed assignment:')
232
+ table(w1$seed)
233
+ print('system assignment:')
234
+ table(w1$trt_system)
235
+ print('seed & system assignment:')
236
+ table(w1$trt_system, w1$seed)
237
+
238
+ # sent to YouGov
239
+ # write_csv(select(w1, caseid, trt_system, seed, trt_assign), "mw_yg_wave1_assignments.csv")
code/minimum wage (issue 2)/02_clean_merge.R ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## YouTube Algorithms and Minimum Wage Opinions
2
+ ## Data collected April 2022 via MTurk/CloudResearch
3
+
4
+ cat(rep('=', 80),
5
+ '\n\n',
6
+ 'OUTPUT FROM: minimum wage (issue 2)/02_clean_merge.R',
7
+ '\n\n',
8
+ sep = ''
9
+ )
10
+
11
+ ## Preamble ----------------------------
12
+ library(tidyverse)
13
+ library(janitor)
14
+ library(lubridate)
15
+ library(stargazer)
16
+ library(broom)
17
+
18
+ a <- read_csv("../data/minimum wage (issue 2)/YouTube+Min+Wage+-+Apr+2022+presurvey_July+5,+2022_12.50.csv")[-c(1,2),] %>%
19
+ clean_names()
20
+
21
+
22
+ # Wave 1 =======================================================================
23
+
24
+ ## Recodes:
25
+ a <- a %>% mutate(start_date = as_datetime(start_date),
26
+ end_date = as_datetime(end_date),
27
+ survey_time = as.numeric(end_date-start_date))
28
+
29
+ print('wave 1 survey time')
30
+ summary(a$survey_time)
31
+
32
+ # Demographics -----------------------------------------------------------------
33
+
34
+ a <- a %>%
35
+ mutate(female = ifelse(q26 == "Woman", 1, 0),
36
+ male = ifelse(q26 == "Man", 1, 0),
37
+ black = ifelse(str_detect(q29, "Black"), 1, 0),
38
+ white = ifelse(str_detect(q29, "White"), 1, 0),
39
+ college = ifelse(str_detect(q30, "college ") | str_detect(q30, "Post"), 1, 0),
40
+ income_gt50k = ifelse(q31 %in% names(table(a$q31))[c(2,3,5,10:13)], 1, 0)
41
+ )
42
+ a$income_gt50k[is.na(a$q31)] <- NA
43
+
44
+ # PID:
45
+
46
+ a <- a %>%
47
+ mutate(pid = case_when(pid1=="Democrat" ~ -1,
48
+ pid1=="Republican" ~ 1,
49
+ pid4=="Closer to the Republican Party" ~ 1,
50
+ pid4=="Closer to the Democratic Party" ~ -1,
51
+ pid4=="Neither" ~ 0))
52
+
53
+ tabyl(a,pid)
54
+
55
+
56
+ a <- a %>%
57
+ mutate(ideo = case_when(ideo1=="Liberal" ~ -1,
58
+ ideo1=="Conservative" ~ 1,
59
+ ideo4=="Closer to conservatives" ~ 1,
60
+ ideo4=="Closer to liberals" ~ -1,
61
+ ideo4=="Neither" ~ 0))
62
+
63
+ tabyl(a,ideo)
64
+
65
+ a$age <- 2022-as.numeric(a$q27)
66
+
67
+ # age categories: 18-29; 30-44; 45-64; 65+
68
+ a <- a %>%
69
+ mutate(age_cat = case_when(age>=18 & age<=29 ~ "18-29",
70
+ age>=30 & age<=44 ~ "30-44",
71
+ age>=45 & age<=64 ~ "45-64",
72
+ age>=65 ~ "65+"
73
+ ))
74
+ a <- a %>%
75
+ fastDummies::dummy_cols(select_columns = "age_cat",remove_selected_columns = F)
76
+
77
+ ## Need:
78
+ # political interest (5-point scale: 1=Not atall interested, 5=Extremely interested),
79
+ # self-reported YouTube usage frequency (7-pointscale: 0=None, 6=More than 3 hours per day),
80
+ # number of self-reported favorite YouTubechannels (count coded from open-ended question: “Who/what are your favorite YouTubebroadcasters or channels?”; 0 if blank),
81
+ # indicator for having watched videos from popularchannels (1 if any selected: “In the past week, have you watched videos from any of thefollowing YouTube broadcasters or channels?”),
82
+ # video vs. text preference (1=Alwaysprefer videos, 10=Always prefer text),
83
+ # gun enthusiasm (additive index of “Do you ordoes anyone in your household own a gun?” with yes=1 and “How often, if ever, do youvisit websites about guns, hunting or other shooting sports?” from 0=Never or Hardlyever to 1=Sometimes or Often),
84
+ # gun policy issue importance (4-point scale: 1=Not atall important, 4=Very important)
85
+
86
+ a <- a %>%
87
+ mutate(pol_interest = dplyr::recode(q91,"Extremely interested"=5,"Very interested"=4,"Somewhat interested"=3,"Not very interested"=2,"Not at all interested"=1),
88
+ freq_youtube = dplyr::recode(q77,"More than 3 hours per day"=6,"2–3 hours per day"=5,"1–2 hours per day"=4,"31–59 minutes per day"=3,"10–30 minutes per day"=2,"Less than 10 minutes per day"=1,"None"=0),
89
+ )
90
+
91
+ descr_data <- as.data.frame(select(a,
92
+ female,
93
+ white,
94
+ black,
95
+ age,
96
+ college,
97
+ income_gt50k))
98
+ descr_data <- descr_data %>% filter(rowSums(is.na(.)) != ncol(.))
99
+ descriptive_tab <- stargazer(descr_data,
100
+ summary = T, digits=2,
101
+ summary.stat=c("mean","sd","median","min","max","n"),
102
+ covariate.labels = c("Female",
103
+ "White",
104
+ "Black",
105
+ "Age",
106
+ "College educated",
107
+ "Income \\textgreater 50k"),
108
+ float = F,
109
+ out = "../results/minwage_descriptive_tab.tex")
110
+
111
+ summary_tab <- a %>%
112
+ dplyr::summarize(female = mean(female,na.rm=T),
113
+ white = mean(white,na.rm=T),
114
+ black = mean(black,na.rm=T),
115
+ age1829 = mean(`age_cat_18-29`,na.rm=T),
116
+ age3044 = mean(`age_cat_30-44`,na.rm=T),
117
+ age4564 = mean(`age_cat_45-64`,na.rm=T),
118
+ age65p = mean(`age_cat_65+`,na.rm=T),
119
+ college = mean(college,na.rm=T),
120
+ income_gt50k = mean(income_gt50k,na.rm=T),
121
+ democrat = mean(pid==-1,na.rm=T),
122
+ republican = mean(pid==1,na.rm=T))
123
+
124
+ summary_tab <- pivot_longer(summary_tab,
125
+ cols=c(female,
126
+ white,
127
+ black,
128
+ age1829,
129
+ age3044,
130
+ age4564,
131
+ age65p,
132
+ college,
133
+ income_gt50k,
134
+ democrat,
135
+ republican),
136
+ names_to = "outcome",values_to = "survey_avg")
137
+ outcome_labels <- data.frame(outcome_pretty = c("Female",
138
+ "White",
139
+ "Black",
140
+ "Age 18-29",
141
+ "Age 30-44",
142
+ "Age 45-64",
143
+ "Age 65+",
144
+ "College educated",
145
+ "Income >$50k",
146
+ "Democrat",
147
+ "Republican"),
148
+ outcome = c("female",
149
+ "white",
150
+ "black",
151
+ "age1829",
152
+ "age3044",
153
+ "age4564",
154
+ "age65p",
155
+ "college",
156
+ "income_gt50k",
157
+ "democrat",
158
+ "republican"))
159
+ summary_tab$outcome_pretty <- outcome_labels$outcome_pretty[match(summary_tab$outcome,outcome_labels$outcome)]
160
+ summary_tab <- summary_tab %>%
161
+ mutate(outcome_pretty = factor(outcome_pretty,levels = c("Republican",
162
+ "Democrat",
163
+ "Income >$50k",
164
+ "College educated",
165
+ "Age 65+",
166
+ "Age 45-64",
167
+ "Age 30-44",
168
+ "Age 18-29",
169
+ "Female",
170
+ "Black",
171
+ "White"
172
+ ),ordered=T))
173
+
174
+ (descrip_fig <- ggplot(summary_tab) +
175
+ geom_point(aes(y=outcome_pretty,x=survey_avg)) +
176
+ geom_text(aes(y=outcome_pretty,x=survey_avg,label=paste0(round(100*survey_avg,0),"%")),nudge_x = 0.1) +
177
+ scale_y_discrete("") +
178
+ scale_x_continuous("",labels=scales::percent_format(),limits=c(0,1)) +
179
+ theme_bw()
180
+ )
181
+ ggsave(descrip_fig,filename = "../results/minwage_demographics.pdf",height=5,width=4)
182
+
183
+
184
+
185
+ # A/V check
186
+ print('audio ok:')
187
+ length(which(a$q87 == "Quick and easy"))/length(a$q87)
188
+ print('video ok:')
189
+ length(which(a$q89 == "wikiHow"))/length(a$q89)#dk
190
+
191
+ #### Outcomes ####
192
+
193
+ ##### policy opinions #####
194
+ # convert to numeric unit scale:
195
+ a <- a %>%
196
+ mutate( # higher = more conservative or anti-min wage
197
+ minwage15 = dplyr::recode(minwage15,"Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
198
+ rtwa_v1 = dplyr::recode(rtwa_v1, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
199
+ # minwage_inflation = recode(minwage_inflation,"Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
200
+ rtwa_v2 = dplyr::recode(rtwa_v2, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
201
+ mw_support = dplyr::recode(mw_support, "Strongly oppose raising the minimum wage"=4,"Somewhat oppose raising the minimum wage"=3,"Neither support nor oppose raising the minimum wage"=2,"Somewhat support raising the minimum wage"=1,"Strongly support raising the minimum wage"=0)/4,
202
+ minwage_howhigh = dplyr::recode(minwage_howhigh, "Much lower than the current level"=4,"Somewhat lower than the current level"=3,"About the current level"=2,"Somewhat higher than the current level"=1,"Much higher than the current level"=0)/4,
203
+ mw_help = dplyr::recode(mw_help, "Would hurt low-income workers\n10\n"=9,"9"=8,"8"=7,"7"=6,"6"=5,"5"=4,"4"=3,"3"=2,"2"=1,"Would help low-income workers\n1"=0)/9,
204
+ mw_restrict = dplyr::recode(mw_restrict, "Would restrict businesses' freedom\n1\n"=9,"2"=8,"3"=7,"4"=6,"5"=5,"6"=4,"7"=3,"8"=2,"9"=1,"Would protect workers from exploitation\n10\n"=0)/9,
205
+ minwage_text_r = (25-as.numeric(minwage_text))/25,
206
+ )
207
+ a$minwage_text_r[as.numeric(a$minwage_text)>25] <- NA
208
+
209
+ a <- a %>%
210
+ rowwise() %>%
211
+ mutate(mw_index = mean(c(minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r), na.rm=T)) %>%
212
+ ungroup()
213
+
214
+ # Cronbach's alpha
215
+ index_fa <- psych::alpha(select(a, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r), check.keys = TRUE)
216
+ write.csv(data.frame(cor(select(a, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r), use = "complete.obs")),row.names = T,
217
+ file = "../results/minwage_cormat_mwindex_w1.csv")
218
+
219
+ pdf("../results/corrplot_mwindex_w1.pdf")
220
+ w1_corrplot <- corrplot::corrplot(cor(select(a, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r), use = "complete.obs"),method = "shade")
221
+ dev.off()
222
+
223
+ alpha <- index_fa$total["raw_alpha"]
224
+ writeLines(as.character(round(alpha,2)),con = "../results/minwage_outcomes_alpha_w1_mturk.tex",sep = "%")
225
+
226
+ # FACTOR ANALYSIS WITH VARIMAX ROTATION (PRE)
227
+ pca2 <- psych::principal(select(a, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r),
228
+ rotate="varimax",
229
+ nfactors=1
230
+ )
231
+ pc2 <- pca2$Vaccounted[2]
232
+ writeLines(as.character(round(pc2, 2)),con = "../results/outcomes_pc2_study2_pre.tex",sep = "%")
233
+
234
+ ##### media trust #####
235
+ a <- a %>%
236
+ mutate( # higher = more trusting
237
+ trust_majornews = dplyr::recode(q58_1,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
238
+ trust_localnews = dplyr::recode(q58_2,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
239
+ trust_social = dplyr::recode(q58_3,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
240
+ trust_youtube = dplyr::recode(q58_4,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
241
+ fabricate_majornews = dplyr::recode(q89_1,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4,
242
+ fabricate_youtube = dplyr::recode(q90,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4
243
+ ) %>%
244
+ rowwise() %>%
245
+ mutate(media_trust = mean(trust_majornews,trust_localnews,fabricate_majornews,na.rm=T)) %>%
246
+ ungroup()
247
+
248
+ media_trust_fa <- psych::alpha(select(a, trust_majornews,trust_localnews,fabricate_majornews), check.keys = TRUE)
249
+ print('media trust alpha:')
250
+ media_trust_fa$total["raw_alpha"]
251
+
252
+
253
+ ##### affective polarization #####
254
+ # check FTs:
255
+ a %>%
256
+ group_by(pid) %>%
257
+ summarize(mean_2=mean(as.numeric(q5_2),na.rm=T), # Trump
258
+ mean_5=mean(as.numeric(q5_5),na.rm=T), # Biden
259
+ mean_11=mean(as.numeric(q5_11),na.rm=T), # dems
260
+ mean_12=mean(as.numeric(q5_12),na.rm=T)) # reps
261
+
262
+ a <- a %>%
263
+ mutate( # higher = more trusting
264
+ smart_dems = dplyr::recode(q61, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
265
+ smart_reps = dplyr::recode(q62_1, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
266
+ comfort_dems = dplyr::recode(q87_1,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
267
+ comfort_reps = dplyr::recode(q88,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
268
+ ft_dems = as.numeric(q5_11),
269
+ ft_reps = as.numeric(q5_12),
270
+ affpol_smart = case_when(
271
+ pid==-1 ~ smart_dems-smart_reps,
272
+ pid==1 ~ smart_reps-smart_dems
273
+ ),
274
+ affpol_comfort = case_when(
275
+ pid==-1 ~ comfort_dems-comfort_reps,
276
+ pid==1 ~ comfort_reps-comfort_dems
277
+ ),
278
+ affpol_ft = case_when(
279
+ pid==-1 ~ ft_dems-ft_reps,
280
+ pid==1 ~ ft_reps-ft_dems
281
+ )
282
+ )
283
+
284
+
285
+
286
+ ## for reinvitations:
287
+ w1_reinvited <- a %>% filter(q87 == "Quick and easy", q89 == "wikiHow") # AV checks
288
+ w1_reinvited <- w1_reinvited %>% filter(mw_index >= 0.025, mw_index <= 0.975)
289
+
290
+
291
+ w1_reinvited$thirds <- cut(w1_reinvited$mw_index, breaks = quantile(w1_reinvited$mw_index, c(0, 1/3, 2/3, 1)), include.lowest = TRUE, labels = 1:3)
292
+ a$thirds <- w1_reinvited$thirds[match(a$worker_id,w1_reinvited$worker_id)]
293
+
294
+ write_csv(a, "../results/intermediate data/minimum wage (issue 2)/qualtrics_w1_clean.csv")
295
+
296
+
297
+ # Wave 2 (main survey) =========================================================
298
+
299
+
300
+ w2 <- read_csv("../data/minimum wage (issue 2)/YouTube+Min+Wage+-+Apr+2022+main+survey_July+5,+2022_12.47.csv")[-c(1,2),] %>%
301
+ clean_names() %>%
302
+ select(-thirds) # remove all-NA column
303
+
304
+ w2 <- w2 %>% mutate(start_date_w2 = as_datetime(start_date),
305
+ end_date_w2 = as_datetime(end_date),
306
+ survey_time_w2 = as.numeric(end_date_w2-start_date_w2))
307
+
308
+ print('wave 2 survey time:')
309
+ summary(w2$survey_time_w2)
310
+
311
+ print('audio ok:')
312
+ length(which(w2$q81 == "Quick and easy"))/length(w2$q81)
313
+ print('video ok:')
314
+ length(which(w2$q82 == "wikiHow"))/length(w2$q82)
315
+
316
+
317
+ #### Outcomes ####
318
+
319
+ ##### policy opinions ######
320
+ # convert to numeric unit scale:
321
+ w2 <- w2 %>%
322
+ mutate( # higher = more pro-gun
323
+ minwage15 = dplyr::recode(minwage15,"Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
324
+ rtwa_v1 = dplyr::recode(rtwa_v1, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
325
+ rtwa_v2 = dplyr::recode(rtwa_v2, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
326
+ mw_support = dplyr::recode(mw_support, "Strongly oppose raising the minimum wage"=4,"Somewhat oppose raising the minimum wage"=3,"Neither support nor oppose raising the minimum wage"=2,"Somewhat support raising the minimum wage"=1,"Strongly support raising the minimum wage"=0)/4,
327
+ minwage_howhigh = dplyr::recode(minwage_howhigh, "Much lower than the current level"=4,"Somewhat lower than the current level"=3,"About the current level"=2,"Somewhat higher than the current level"=1,"Much higher than the current level"=0)/4,
328
+ mw_help = dplyr::recode(mw_help, "Would hurt low-income workers\n10\n"=9,"9"=8,"8"=7,"7"=6,"6"=5,"5"=4,"4"=3,"3"=2,"2"=1,"Would help low-income workers\n1"=0)/9,
329
+ mw_restrict = dplyr::recode(mw_restrict, "Would restrict businesses' freedom\n1\n"=9,"2"=8,"3"=7,"4"=6,"5"=5,"6"=4,"7"=3,"8"=2,"9"=1,"Would protect workers from exploitation\n10\n"=0)/9,
330
+ minwage_text_r = (25-as.numeric(minwage_text))/25,
331
+ )
332
+ w2$minwage_text_r[as.numeric(w2$minwage_text)>25] <- NA
333
+
334
+ w2 <- w2 %>%
335
+ rowwise() %>%
336
+ mutate(mw_index = mean(c(minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r), na.rm=T)) %>%
337
+ ungroup()
338
+
339
+
340
+ # Cronbach's alpha
341
+ index_fa <- psych::alpha(select(w2, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r), check.keys = T)
342
+ write.csv(data.frame(cor(select(w2, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r), use = "complete.obs")),row.names = T,
343
+ file = "../results/minwage_cormat_mw_index_w2.csv")
344
+
345
+ pdf("../results/minwage_corrplot_mwindex_w2.pdf")
346
+ w2_corrplot <- corrplot::corrplot(cor(select(w2, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r), use = "complete.obs"),method = "shade")
347
+ dev.off()
348
+
349
+ print('wave 2 policy opinion alpha:')
350
+ (alpha <- index_fa$total["raw_alpha"])
351
+ writeLines(as.character(round(alpha,2)),con = "../results/minwage_outcomes_alpha_w2_mturk.tex",sep = "%")
352
+
353
+ # FACTOR ANALYSIS WITH VARIMAX ROTATION (POST)
354
+ pca2 <- psych::principal(select(w2, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help, mw_restrict, minwage_text_r),
355
+ rotate="varimax",
356
+ nfactors=1
357
+ )
358
+ pc2 <- pca2$Vaccounted[2]
359
+ writeLines(as.character(round(pc2, 2)),con = "../results/outcomes_pc2_study2_post.tex",sep = "%")
360
+
361
+ ##### media trust #####
362
+ w2 <- w2 %>%
363
+ mutate( # higher = more trusting
364
+ trust_majornews = dplyr::recode(q96_1,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
365
+ trust_localnews = dplyr::recode(q96_2,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
366
+ trust_social = dplyr::recode(q96_3,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
367
+ trust_youtube = dplyr::recode(q96_4,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
368
+ fabricate_majornews = dplyr::recode(q98,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4,
369
+ fabricate_youtube = dplyr::recode(q100_1,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4
370
+ ) %>%
371
+ rowwise() %>%
372
+ mutate(media_trust = mean(trust_majornews,trust_localnews,fabricate_majornews,na.rm=T)) %>%
373
+ ungroup()
374
+
375
+ ##### affective polarization #####
376
+ print('check affpol feeling thermometers:')
377
+ w2 <- w2 %>%
378
+ mutate(
379
+ smart_dems = dplyr::recode(q61, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
380
+ smart_reps = dplyr::recode(q62_1, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
381
+ comfort_dems = dplyr::recode(q92,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
382
+ comfort_reps = dplyr::recode(q94,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
383
+ ft_dems = as.numeric(q90_11),
384
+ ft_reps = as.numeric(q90_12)
385
+ )
386
+
387
+ write_csv(w2, "../results/intermediate data/minimum wage (issue 2)/qualtrics_w2_clean.csv")
388
+
389
+
390
+ # join to W1 by MT worker ID:
391
+ w12 <- left_join(a, filter(w2,!is.na(worker_id)), by = "worker_id",suffix=c("_w1","_w2"))
392
+ names(w12)
393
+
394
+ w12 <- w12 %>%
395
+ mutate(
396
+ affpol_smart_w2 = case_when(
397
+ pid==-1 ~ smart_dems_w2-smart_reps_w2,
398
+ pid==1 ~ smart_reps_w2-smart_dems_w2
399
+ ),
400
+ affpol_comfort_w2 = case_when(
401
+ pid==-1 ~ comfort_dems_w2-comfort_reps_w2,
402
+ pid==1 ~ comfort_reps_w2-comfort_dems_w2
403
+ ),
404
+ affpol_ft_w2 = case_when(
405
+ pid==-1 ~ ft_dems_w2-ft_reps_w2,
406
+ pid==1 ~ ft_reps_w2-ft_dems_w2
407
+ ))
408
+
409
+ write_csv(w12, "../results/intermediate data/minimum wage (issue 2)/qualtrics_w12_clean.csv")
410
+
411
+
412
+ ## YTRecs session data: -------------------------------------------------------
413
+
414
+ ytrecs <- read_rds("../data/minimum wage (issue 2)/min_wage_data.rds") %>%
415
+ clean_names() %>%
416
+ as_tibble()
417
+
418
+ ytrecs <- ytrecs %>%
419
+ mutate(duration = end_time2 - start_time2) %>%
420
+ select(topic_id,urlid,pro,anti,duration,pro_up,pro_down,anti_up,anti_down,pro_save,anti_save,start_time2, end_time2) %>%
421
+ filter(str_detect(urlid,"mt_") & !is.na(pro))
422
+
423
+ ytrecs <- ytrecs %>%
424
+ group_by(topic_id,urlid) %>%
425
+ mutate(dupes = n(),
426
+ max_duration = ifelse(duration==max(duration),1,0)
427
+ ) %>%
428
+ filter(max_duration==1) # using longest session as valid one
429
+
430
+ ytrecs <- ytrecs %>%
431
+ mutate(
432
+ pro_up = replace_na(pro_up,0),
433
+ pro_down = replace_na(pro_down,0),
434
+ anti_up = replace_na(anti_up,0),
435
+ anti_down = replace_na(anti_down,0),
436
+ pro_save = replace_na(pro_save,0),
437
+ anti_save = replace_na(anti_save,0)) %>%
438
+ rowwise() %>%
439
+ mutate(total_likes = sum(pro_up,anti_up,na.rm=T),
440
+ total_dislikes = sum(pro_down,anti_down,na.rm=T),
441
+ total_thumbs = sum(pro_up,pro_down,anti_up,anti_down,na.rm=T),
442
+ total_saved = sum(pro_save,anti_save,na.rm=T),
443
+ total_interactions = sum(pro_up,pro_down,anti_up,anti_down,pro_save,anti_save,na.rm=T),
444
+ positive_interactions = total_likes + total_saved - total_dislikes
445
+ )
446
+
447
+ ytrecs <- ytrecs %>%
448
+ mutate(seed = str_replace(topic_id,".*_(\\w+)$","\\1")) %>%
449
+ mutate(pro = as.numeric(pro),
450
+ anti = as.numeric(anti)) %>%
451
+ mutate(pro_fraction_chosen = case_when(
452
+ seed=="anti" ~ pro/(pro+anti-1),
453
+ seed=="pro" ~ (pro-1)/(pro+anti-1)
454
+ ))
455
+ # adjust for zeros:
456
+ ytrecs$pro_fraction_chosen[ytrecs$pro==0 & ytrecs$anti==0] <- NA
457
+
458
+
459
+ w12 <- w12 %>%
460
+ ungroup() %>%
461
+ mutate(topic_id = str_replace(video_link_w2,".*&topicid=(.*)&allowDupe=1&id=(.*)$","\\1"),
462
+ urlid = str_replace(video_link_w2,".*&topicid=(.*?)&allowDupe=1&id=(.*)$","\\2"),
463
+ )
464
+
465
+ w12 <- left_join(w12,ytrecs,by=c("topic_id","urlid"))
466
+
467
+ w12 <- w12 %>%
468
+ arrange(worker_id, start_time2) %>%
469
+ group_by(worker_id) %>%
470
+ slice(1) %>% # Keep first resp
471
+ ungroup()
472
+ print("ISSUE 2 NUMBERS:")
473
+ print(paste('count w/ valid ytrecs data:', sum(!is.na(w12$pro))))
474
+ print(paste('count w/ valid ytrecs interactions:', sum(!is.na(w12$total_thumbs))))
475
+ print('interactions:')
476
+ summary(w12$total_interactions)
477
+
478
+
479
+ # create numeric dosage version of treatment:
480
+ w12 <- w12 %>%
481
+ mutate(treatment_dose = dplyr::recode(treatment_arm,
482
+ "anti_31"= 1, "anti_22" = 0,
483
+ "pro_31"= 1, "pro_22" = 0,
484
+ "control"=NA_real_),
485
+ treatment_seed = str_replace(treatment_arm,"(.*)\\_\\d{2}","\\1")
486
+ )
487
+
488
+ write_csv(w12, "../results/intermediate data/minimum wage (issue 2)/qualtrics_w12_clean.csv")
code/minimum wage (issue 2)/02b_clean_merge_yg.R ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: minimum wage (issue 2)/02b_clean_merge_yg.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ library(tidyverse)
9
+ library(lubridate)
10
+ library(stargazer)
11
+ library(haven)
12
+ library(janitor)
13
+
14
+ yg <- read_sav("../data/minimum wage (issue 2)/PRIN0016_MERGED_OUTPUT.sav")
15
+
16
+ ## Recodes:
17
+ yg <- yg %>% mutate(start_date = as_datetime(starttime),
18
+ end_date = as_datetime(endtime),
19
+ start_date_w2 = as_datetime(starttime_W2),
20
+ end_date_w2 = as_datetime(endtime_W2),
21
+ survey_time = as.numeric(end_date-start_date),
22
+ survey_time_w2 = as.numeric(end_date_w2-start_date_w2),
23
+ )
24
+
25
+ print('wave 1 survey time')
26
+ summary(yg$survey_time)
27
+
28
+ print('wave 2 survey time')
29
+ summary(yg$survey_time_w2)
30
+
31
+ #### Demographics ####
32
+ yg <- yg %>%
33
+ mutate(female = ifelse(gender4 == 2, 1, 0),
34
+ male = ifelse(gender4 == 1, 1, 0),
35
+ black = ifelse(race == 2, 1, 0),
36
+ white = ifelse(race == 1, 1, 0),
37
+ college = ifelse(educ == 5 | educ == 6, 1, 0),
38
+ income_gt50k = ifelse(faminc_new >= 6 & faminc_new <= 16, 1, 0)
39
+ )
40
+
41
+ # PID:
42
+ yg <- yg %>%
43
+ mutate(pid = case_when(pid3==1 ~ -1,
44
+ pid3==2 ~ 1,
45
+ pid7>4 & pid7<8 ~ 1,
46
+ pid7<4 ~ -1,
47
+ pid7==4 ~ 0))
48
+
49
+ yg <- yg %>%
50
+ mutate(ideo = case_when(ideo5<3 ~ -1,
51
+ ideo5>3 & ideo5<6 ~ 1,
52
+ ideo5==3 ~ 0))
53
+
54
+ yg$age <- 2022 - yg$birthyr
55
+
56
+ # age categories: 18-29; 30-44; 45-64; 65+
57
+ yg <- yg %>%
58
+ mutate(age_cat = case_when(age>=18 & age<=29 ~ "18-29",
59
+ age>=30 & age<=44 ~ "30-44",
60
+ age>=45 & age<=64 ~ "45-64",
61
+ age>=65 ~ "65+"
62
+ ))
63
+
64
+ yg <- yg %>%
65
+ fastDummies::dummy_cols(select_columns = "age_cat",remove_selected_columns = F)
66
+
67
+ yg <- yg %>%
68
+ mutate(pol_interest = ifelse(newsint>4,NA_real_,newsint),
69
+ pol_interest = (4-pol_interest)/3,
70
+ youtube_freq_v2 = ifelse(youtube_freq>10,NA_real_,youtube_freq),
71
+ freq_youtube_v2 = 10-youtube_freq_v2,
72
+ freq_youtube = (Q77-1)
73
+ )
74
+
75
+
76
+ # Descriptives ------------------------------------------------------------
77
+
78
+ descr_data <- as.data.frame(select(yg,
79
+ female,
80
+ white,
81
+ black,
82
+ age,
83
+ college,
84
+ income_gt50k))
85
+ descr_data <- descr_data %>% filter(rowSums(is.na(.)) != ncol(.))
86
+ descriptive_tab <- stargazer(descr_data,
87
+ summary = T, digits=2,
88
+ summary.stat=c("mean","sd","median","min","max","n"),
89
+ covariate.labels = c("Female",
90
+ "White",
91
+ "Black",
92
+ "Age",
93
+ "College educated",
94
+ "Income \\textgreater 50k"),
95
+ float = F,
96
+ out = "../results/minwage_descriptive_tab_yg.tex")
97
+
98
+ summary_tab <- yg %>%
99
+ dplyr::summarize(female = mean(female,na.rm=T),
100
+ white = mean(white,na.rm=T),
101
+ black = mean(black,na.rm=T),
102
+ age1829 = mean(`age_cat_18-29`,na.rm=T),
103
+ age3044 = mean(`age_cat_30-44`,na.rm=T),
104
+ age4564 = mean(`age_cat_45-64`,na.rm=T),
105
+ age65p = mean(`age_cat_65+`,na.rm=T),
106
+ college = mean(college,na.rm=T),
107
+ income_gt50k = mean(income_gt50k,na.rm=T),
108
+ democrat = mean(pid==-1,na.rm=T),
109
+ republican = mean(pid==1,na.rm=T))
110
+
111
+ summary_tab <- pivot_longer(summary_tab,
112
+ cols=c(female,
113
+ white,
114
+ black,
115
+ age1829,
116
+ age3044,
117
+ age4564,
118
+ age65p,
119
+ college,
120
+ income_gt50k,
121
+ democrat,
122
+ republican),
123
+ names_to = "outcome",values_to = "survey_avg")
124
+ outcome_labels <- data.frame(outcome_pretty = c("Female",
125
+ "White",
126
+ "Black",
127
+ "Age 18-29",
128
+ "Age 30-44",
129
+ "Age 45-64",
130
+ "Age 65+",
131
+ "College educated",
132
+ "Income >$50k",
133
+ "Democrat",
134
+ "Republican"),
135
+ outcome = c("female",
136
+ "white",
137
+ "black",
138
+ "age1829",
139
+ "age3044",
140
+ "age4564",
141
+ "age65p",
142
+ "college",
143
+ "income_gt50k",
144
+ "democrat",
145
+ "republican"))
146
+ summary_tab$outcome_pretty <- outcome_labels$outcome_pretty[match(summary_tab$outcome,outcome_labels$outcome)]
147
+ summary_tab <- summary_tab %>%
148
+ mutate(outcome_pretty = factor(outcome_pretty,levels = c("Republican",
149
+ "Democrat",
150
+ "Income >$50k",
151
+ "College educated",
152
+ "Age 65+",
153
+ "Age 45-64",
154
+ "Age 30-44",
155
+ "Age 18-29",
156
+ "Female",
157
+ "Black",
158
+ "White"
159
+ ),ordered=T))
160
+
161
+ (descrip_fig <- ggplot(summary_tab) +
162
+ geom_point(aes(y=outcome_pretty,x=survey_avg)) +
163
+ geom_text(aes(y=outcome_pretty,x=survey_avg,label=paste0(round(100*survey_avg,0),"%")),nudge_x = 0.1) +
164
+ scale_y_discrete("") +
165
+ scale_x_continuous("",labels=scales::percent_format(),limits=c(0,1)) +
166
+ theme_bw()
167
+ )
168
+
169
+ ggsave(descrip_fig,filename = "../results/minwage_demographics_yg.pdf",height=5,width=4)
170
+
171
+
172
+
173
+ #### A/V check
174
+ print('audio ok:')
175
+ length(which(yg$Q81_W2 == 1))/length(which(!is.na(yg$Q81_W2)))
176
+ print('video ok:')
177
+ length(which(yg$Q82_W2 == 1))/length(which(!is.na(yg$Q82_W2)))
178
+
179
+
180
+
181
+ #### Outcomes ####
182
+
183
+ ##### policy opinions #####
184
+ # convert to numeric unit scale:
185
+ yg <- yg %>%
186
+ mutate( # higher = more conservative or anti-min wage
187
+ minwage15_w1 = (minwage15-1)/4,
188
+ rtwa_v1_w1 = (RTWA_v1-1)/4,
189
+ rtwa_v2_w1 = (RTWA_v2-1)/4,
190
+ mw_support_w1 = (mw_support-1)/4,
191
+ minwage_howhigh_w1 = (minwage_howhigh-1)/4,
192
+ mw_help_w1 = (mw_help_a-1)/9,
193
+ mw_restrict_w1 = (10-mw_restrict_1)/9,
194
+ minwage_text_r_w1 = (25-as.numeric(minwage_text))/25,
195
+ )
196
+
197
+ yg <- yg %>%
198
+ rowwise() %>%
199
+ mutate(mw_index_w1 = mean(c(minwage15_w1, rtwa_v1_w1, rtwa_v2_w1, mw_support_w1, minwage_howhigh_w1, mw_help_w1, mw_restrict_w1, minwage_text_r_w1), na.rm=T)) %>%
200
+ ungroup()
201
+
202
+ # Cronbach's alpha
203
+ index_fa <- psych::alpha(select(yg, minwage15_w1, rtwa_v1_w1, rtwa_v2_w1, mw_support_w1, minwage_howhigh_w1, mw_help_w1, mw_restrict_w1, minwage_text_r_w1), check.keys = TRUE)
204
+ write.csv(data.frame(cor(select(yg, minwage15_w1, rtwa_v1_w1, rtwa_v2_w1, mw_support_w1, minwage_howhigh_w1, mw_help_w1, mw_restrict_w1, minwage_text_r_w1), use = "complete.obs")),row.names = T,
205
+ file = "../results/cormat_mwindex_w1_yg.csv")
206
+
207
+ pdf("../results/corrplot_mwindex_w1_yg.pdf")
208
+ w1_corrplot <- corrplot::corrplot(cor(select(yg, minwage15_w1, rtwa_v1_w1, rtwa_v2_w1, mw_support_w1, minwage_howhigh_w1, mw_help_w1, mw_restrict_w1, minwage_text_r_w1), use = "complete.obs"),method = "shade")
209
+ dev.off()
210
+
211
+ alpha <- index_fa$total["raw_alpha"]
212
+ writeLines(as.character(round(alpha,2)),con = "../results/minwage_outcomes_alpha_w1_yg.tex",sep = "%")
213
+
214
+ # FACTOR ANALYSIS WITH VARIMAX ROTATION (PRE)
215
+ pca2 <- psych::principal(select(yg, minwage15_w1, rtwa_v1_w1, rtwa_v2_w1, mw_support_w1, minwage_howhigh_w1, mw_help_w1, mw_restrict_w1, minwage_text_r_w1),
216
+ rotate="varimax",
217
+ nfactors=1
218
+ )
219
+ pc2 <- pca2$Vaccounted[2]
220
+ writeLines(as.character(round(pc2, 2)),con = "../results/outcomes_pc2_study3_pre.tex",sep = "%")
221
+
222
+ ##### media trust #####
223
+ yg <- yg %>%
224
+ mutate( # higher = more trusting
225
+ trust_majornews_w1 = (4-Q58_a)/3,
226
+ trust_localnews_w1 = (4-Q58_b)/3,
227
+ trust_social_w1 = (4-Q58_c)/3,
228
+ trust_youtube_w1 = (4-Q58_d)/3,
229
+ fabricate_majornews_w1 = (5-Q89b)/4,
230
+ fabricate_youtube_w1 = (5-Q90)/4
231
+ ) %>%
232
+ rowwise() %>%
233
+ mutate(media_trust_w1 = mean(trust_majornews_w1,trust_localnews_w1,fabricate_majornews_w1,na.rm=T)) %>%
234
+ ungroup()
235
+
236
+ media_trust_fa <- psych::alpha(select(yg, trust_majornews_w1,trust_localnews_w1,fabricate_majornews_w1), check.keys = TRUE)
237
+ print('media trust alpha:')
238
+ media_trust_fa$total["raw_alpha"]
239
+
240
+
241
+ ##### affective polarization #####
242
+ # check FTs:
243
+ yg %>%
244
+ group_by(pid) %>%
245
+ summarize(mean_2=mean(as.numeric(Q5_a),na.rm=T), # Trump
246
+ mean_5=mean(as.numeric(Q5_b),na.rm=T), # Biden
247
+ mean_11=mean(as.numeric(Q5_c),na.rm=T), # dems
248
+ mean_12=mean(as.numeric(Q5_d),na.rm=T)) # reps
249
+
250
+ yg <- yg %>%
251
+ mutate(
252
+ smart_dems = (5-Q61)/4,
253
+ smart_reps = (5-Q62)/4,
254
+ comfort_dems = (Q87b-1)/3,
255
+ comfort_reps = (Q88-1)/3,
256
+ ft_dems = as.numeric(Q5_c),
257
+ ft_reps = as.numeric(Q5_d),
258
+ affpol_smart = case_when(
259
+ pid==-1 ~ smart_dems-smart_reps,
260
+ pid==1 ~ smart_reps-smart_dems
261
+ ),
262
+ affpol_comfort = case_when(
263
+ pid==-1 ~ comfort_dems-comfort_reps,
264
+ pid==1 ~ comfort_reps-comfort_dems
265
+ ),
266
+ affpol_ft = case_when(
267
+ pid==-1 ~ ft_dems-ft_reps,
268
+ pid==1 ~ ft_reps-ft_dems
269
+ )
270
+ )
271
+
272
+
273
+
274
+ # W2 ----------------------------------------------------------------------
275
+
276
+ ##### policy opinions #####
277
+ # convert to numeric unit scale:
278
+ yg <- yg %>%
279
+ mutate( # higher = more conservative or anti-min wage
280
+ minwage15_w2 = (minwage15_W2-1)/4,
281
+ rtwa_v1_w2 = (RTWA_v1_W2-1)/4,
282
+ rtwa_v2_w2 = (RTWA_v2_W2-1)/4,
283
+ mw_support_w2 = (mw_support_W2-1)/4,
284
+ minwage_howhigh_w2 = (minwage_howhigh_W2-1)/4,
285
+ mw_help_w2 = (mw_help_a_W2-1)/9,
286
+ mw_restrict_w2 = (10-mw_restrict_1_W2)/9,
287
+ minwage_text_r_w2 = (25-as.numeric(minwage_text_W2))/25,
288
+ )
289
+
290
+ yg <- yg %>%
291
+ rowwise() %>%
292
+ mutate(mw_index_w2 = mean(c(minwage15_w2, rtwa_v1_w2, rtwa_v2_w2, mw_support_w2, minwage_howhigh_w2, mw_help_w2, mw_restrict_w2, minwage_text_r_w2), na.rm=T)) %>%
293
+ ungroup()
294
+
295
+ # Cronbach's alpha
296
+ index_fa <- psych::alpha(select(yg, minwage15_w2, rtwa_v1_w2, rtwa_v2_w2, mw_support_w2, minwage_howhigh_w2, mw_help_w2, mw_restrict_w2, minwage_text_r_w2), check.keys = TRUE)
297
+ write.csv(data.frame(cor(select(yg, minwage15_w2, rtwa_v1_w2, rtwa_v2_w2, mw_support_w2, minwage_howhigh_w2, mw_help_w2, mw_restrict_w2, minwage_text_r_w2), use = "complete.obs")),row.names = T,
298
+ file = "../results/cormat_mwindex_w2_yg.csv")
299
+
300
+ pdf("../results/corrplot_mwindex_w2_yg.pdf")
301
+ w2_corrplot <- corrplot::corrplot(cor(select(yg, minwage15_w2, rtwa_v1_w2, rtwa_v2_w2, mw_support_w2, minwage_howhigh_w2, mw_help_w2, mw_restrict_w2, minwage_text_r_w2), use = "complete.obs"),method = "shade")
302
+ dev.off()
303
+
304
+ print('wave 2 policy opinion alpha:')
305
+ (alpha <- index_fa$total["raw_alpha"])
306
+ writeLines(as.character(round(alpha,2)),con = "../results/minwage_outcomes_alpha_w2_mturk.tex",sep = "%")
307
+
308
+ # FACTOR ANALYSIS WITH VARIMAX ROTATION (POST)
309
+ pca2 <- psych::principal(select(yg, minwage15_w2, rtwa_v1_w2, rtwa_v2_w2, mw_support_w2, minwage_howhigh_w2, mw_help_w2, mw_restrict_w2, minwage_text_r_w2),
310
+ rotate="varimax",
311
+ nfactors=1
312
+ )
313
+ pc2 <- pca2$Vaccounted[2]
314
+ writeLines(as.character(round(pc2, 2)),con = "../results/outcomes_pc2_study3_post.tex",sep = "%")
315
+
316
+ ##### media trust #####
317
+ yg <- yg %>%
318
+ mutate( # higher = more trusting
319
+ trust_majornews_w2 = (4-Q58_a_W2)/3,
320
+ trust_localnews_w2 = (4-Q58_b_W2)/3,
321
+ trust_social_w2 = (4-Q58_c_W2)/3,
322
+ trust_youtube_w2 = (4-Q58_d_W2)/3,
323
+ fabricate_majornews_w2 = (5-Q89b_W2)/4,
324
+ fabricate_youtube_w2 = (5-Q90_W2)/4
325
+ ) %>%
326
+ rowwise() %>%
327
+ mutate(media_trust_w2 = mean(c(trust_majornews_w2,trust_localnews_w2,fabricate_majornews_w2),na.rm=T)) %>%
328
+ ungroup()
329
+
330
+ ##### affective polarization #####
331
+ print('check affpol feeling thermometers:')
332
+ yg %>%
333
+ group_by(pid) %>%
334
+ summarize(mean_2=mean(as.numeric(Q5_a_W2),na.rm=T), # Trump
335
+ mean_5=mean(as.numeric(Q5_b_W2),na.rm=T), # Biden
336
+ mean_11=mean(as.numeric(Q5_c_W2),na.rm=T), # dems
337
+ mean_12=mean(as.numeric(Q5_d_W2),na.rm=T)) # reps
338
+
339
+ yg <- yg %>%
340
+ mutate( # higher = more trusting
341
+ smart_dems_w2 = (5-Q61_W2)/4,
342
+ smart_reps_w2 = (5-Q62_W2)/4,
343
+ comfort_dems_w2 = (Q92_W2-1)/3,
344
+ comfort_reps_w2 = (Q94_W2-1)/3,
345
+ ft_dems_w2 = as.numeric(Q5_c_W2),
346
+ ft_reps_w2 = as.numeric(Q5_d_W2),
347
+ affpol_smart_w2 = case_when(
348
+ pid==-1 ~ smart_dems_w2-smart_reps_w2,
349
+ pid==1 ~ smart_reps_w2-smart_dems_w2
350
+ ),
351
+ affpol_comfort_w2 = case_when(
352
+ pid==-1 ~ comfort_dems_w2-comfort_reps_w2,
353
+ pid==1 ~ comfort_reps_w2-comfort_dems_w2
354
+ ),
355
+ affpol_ft_w2 = case_when(
356
+ pid==-1 ~ ft_dems_w2-ft_reps_w2,
357
+ pid==1 ~ ft_reps_w2-ft_dems_w2
358
+ )
359
+ )
360
+
361
+
362
+ ## YTRecs session data: -------------------------------------------------------
363
+
364
+ ytrecs <- read_rds("../data/minimum wage (issue 2)/min_wage_data.rds") %>%
365
+ clean_names() %>%
366
+ as_tibble()
367
+
368
+ ytrecs <- ytrecs %>%
369
+ mutate(duration = end_time2 - start_time2) %>%
370
+ select(topic_id,urlid,pro,anti,duration,pro_up,pro_down,anti_up,anti_down,pro_save,anti_save,start_time2, end_time2) %>%
371
+ filter(str_detect(urlid,"mt_",negate = T) & !is.na(pro))
372
+
373
+ ytrecs <- ytrecs %>%
374
+ group_by(topic_id,urlid) %>%
375
+ mutate(dupes = n(),
376
+ max_duration = ifelse(duration==max(duration),1,0)
377
+ ) %>%
378
+ filter(max_duration==1) # using longest session as valid one
379
+
380
+ ytrecs <- ytrecs %>%
381
+ mutate(
382
+ pro_up = replace_na(pro_up,0),
383
+ pro_down = replace_na(pro_down,0),
384
+ anti_up = replace_na(anti_up,0),
385
+ anti_down = replace_na(anti_down,0),
386
+ pro_save = replace_na(pro_save,0),
387
+ anti_save = replace_na(anti_save,0)) %>%
388
+ rowwise() %>%
389
+ mutate(total_likes = sum(pro_up,anti_up,na.rm=T),
390
+ total_dislikes = sum(pro_down,anti_down,na.rm=T),
391
+ total_thumbs = sum(pro_up,pro_down,anti_up,anti_down,na.rm=T),
392
+ total_saved = sum(pro_save,anti_save,na.rm=T),
393
+ total_interactions = sum(pro_up,pro_down,anti_up,anti_down,pro_save,anti_save,na.rm=T),
394
+ positive_interactions = total_likes + total_saved - total_dislikes
395
+ )
396
+
397
+ ytrecs <- ytrecs %>%
398
+ mutate(seed = str_replace(topic_id,".*_(\\w+)$","\\1")) %>%
399
+ mutate(pro = as.numeric(pro),
400
+ anti = as.numeric(anti)) %>%
401
+ mutate(pro_fraction_chosen = case_when(
402
+ seed=="anti" ~ pro/(pro+anti-1),
403
+ seed=="pro" ~ (pro-1)/(pro+anti-1)
404
+ ))
405
+ # adjust for zeros:
406
+ ytrecs$pro_fraction_chosen[ytrecs$pro==0 & ytrecs$anti==0] <- NA
407
+
408
+
409
+ yg <- yg %>%
410
+ ungroup() %>%
411
+ mutate(
412
+ urlid = session_visa_W2
413
+ )
414
+
415
+ yg <- left_join(yg,ytrecs,by=c("urlid"))
416
+
417
+ print("ISSUE 2 NUMBERS:")
418
+ print(paste('count w/ valid ytrecs data:', sum(!is.na(yg$pro))))
419
+ print(paste('count w/ valid ytrecs interactions:', sum(!is.na(yg$total_thumbs))))
420
+ print('interactions:')
421
+ summary(yg$total_interactions)
422
+
423
+ # create numeric dosage version of treatment:
424
+ yg <- yg %>%
425
+ mutate(treatment_arm = haven::as_factor(treatment_arm_W2),
426
+ treatment_dose = dplyr::recode(treatment_arm,
427
+ "anti_31"= 1, "anti_22" = 0,
428
+ "pro_31"= 1, "pro_22" = 0,
429
+ "control"=NA_real_),
430
+ treatment_seed = str_replace(treatment_arm,"(.*)\\_\\d{2}","\\1")
431
+ )
432
+
433
+ terciles <- read_csv("../results/intermediate data/minimum wage (issue 2)/yougov_terciles.csv")
434
+ yg <- left_join(yg,select(terciles,caseid,thirds=tercile),by="caseid")
435
+
436
+ write_csv(yg, "../results/intermediate data/minimum wage (issue 2)/yg_w12_clean.csv")
code/minimum wage (issue 2)/03_analysis_multipletesting.R ADDED
@@ -0,0 +1,1293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: minimum wage (issue 2)/03_analysis_multipletesting.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ library(data.table)
9
+ library(car)
10
+ library(sandwich)
11
+ library(lmtest)
12
+ library(ggplot2)
13
+ library(assertthat)
14
+ library(foreach)
15
+ library(doParallel)
16
+ registerDoParallel(cores = detectCores() - 1)
17
+
18
+
19
+
20
+ ###############
21
+ ## functions ##
22
+ ###############
23
+
24
+ `%.%` <- paste0
25
+
26
+ simes <- function(ps){
27
+ min(sort(length(ps) * ps / rank(ps)))
28
+ }
29
+
30
+ ### functions to handle inconsistent interaction ordering of mlm() ###
31
+
32
+ ## convert interaction terms of form 'b#:a#' to 'a#:b#'
33
+ reorder.interaction.names <- function(x, prefix = ''){
34
+ x <- gsub('^' %.% prefix, '', x)
35
+ sapply(strsplit(x, ':'),
36
+ function(y){
37
+ paste(sort(y), collapse = ':')
38
+ })
39
+ }
40
+
41
+ ## take term of form 'a1:b1', look up in vector of form 'b#:a#, return 'b1:a1'
42
+ convert.interaction.names <- function(x, y, prefix.y = ''){
43
+ ind <- match(reorder.interaction.names(x),
44
+ reorder.interaction.names(y, prefix = prefix.y)
45
+ )
46
+ return(y[ind])
47
+ }
48
+
49
+ ## modified from print.linearHypothesis.mlm to use alternate df & return pvals
50
+ ## (print method is responsible for doing the actual computation of pvals)
51
+ extract.lht <- function(x,
52
+ SSP = TRUE,
53
+ SSPE = SSP,
54
+ digits = getOption('digits'),
55
+ df.residual = x$df.residual
56
+ ){
57
+ test <- x$test
58
+ if (!is.null(x$P) && SSP) {
59
+ P <- x$P
60
+ cat("\n Response transformation matrix:\n")
61
+ attr(P, "assign") <- NULL
62
+ attr(P, "contrasts") <- NULL
63
+ print(P, digits = digits)
64
+ }
65
+ if (SSP) {
66
+ cat("\nSum of squares and products for the hypothesis:\n")
67
+ print(x$SSPH, digits = digits)
68
+ }
69
+ if (SSPE) {
70
+ cat("\nSum of squares and products for error:\n")
71
+ print(x$SSPE, digits = digits)
72
+ }
73
+ if ((!is.null(x$singular)) && x$singular) {
74
+ warning("the error SSP matrix is singular; multivariate tests are unavailable")
75
+ return(invisible(x))
76
+ }
77
+ SSPE.qr <- qr(x$SSPE)
78
+ eigs <- Re(eigen(qr.coef(SSPE.qr, x$SSPH), symmetric = FALSE)$values)
79
+ tests <- matrix(NA, 4, 4)
80
+ rownames(tests) <- c("Pillai", "Wilks", "Hotelling-Lawley",
81
+ "Roy")
82
+ if ("Pillai" %in% test)
83
+ tests[1, 1:4] <- car:::Pillai(eigs, x$df, df.residual)
84
+ if ("Wilks" %in% test)
85
+ tests[2, 1:4] <- car:::Wilks(eigs, x$df, df.residual)
86
+ if ("Hotelling-Lawley" %in% test)
87
+ tests[3, 1:4] <- car:::HL(eigs, x$df, df.residual)
88
+ if ("Roy" %in% test)
89
+ tests[4, 1:4] <- car:::Roy(eigs, x$df, df.residual)
90
+ tests <- na.omit(tests)
91
+ ok <- tests[, 2] >= 0 & tests[, 3] > 0 & tests[, 4] > 0
92
+ ok <- !is.na(ok) & ok
93
+ tests <- cbind(x$df, tests, pf(tests[ok, 2], tests[ok, 3],
94
+ tests[ok, 4], lower.tail = FALSE))
95
+ colnames(tests) <- c("Df", "test stat", "approx F", "num Df",
96
+ "den Df", "Pr(>F)")
97
+ tests <- structure(as.data.frame(tests),
98
+ heading = paste("\nMultivariate Test",
99
+ if (nrow(tests) > 1)
100
+ "s", ": ", x$title, sep = ""),
101
+ class = c("anova",
102
+ "data.frame"
103
+ )
104
+ )
105
+ return(tests)
106
+ }
107
+
108
+
109
+
110
+ ###############
111
+ ## load data ##
112
+ ###############
113
+
114
+ d <- fread('../results/intermediate data/minimum wage (issue 2)/qualtrics_w12_clean.csv')
115
+
116
+ ## drop pure control
117
+ d <- d[treatment_arm != 'control',]
118
+
119
+ ## drop NA video counts
120
+ d <- d[!is.na(pro) & !is.na(anti),]
121
+
122
+
123
+
124
+ ##############
125
+ ## controls ##
126
+ ##############
127
+
128
+ platform.controls <- c('age_cat',
129
+ 'male',
130
+ 'pol_interest',
131
+ 'freq_youtube'
132
+ )
133
+
134
+ mwpolicy.controls <- 'mw_index_w1'
135
+
136
+ media.controls <- c('trust_majornews_w1',
137
+ 'trust_youtube_w1',
138
+ 'fabricate_majornews_w1',
139
+ 'fabricate_youtube_w1'
140
+ )
141
+
142
+ affpol.controls <- c('affpol_ft',
143
+ 'affpol_smart',
144
+ 'affpol_comfort'
145
+ )
146
+
147
+ controls.raw <- unique(c(platform.controls,
148
+ mwpolicy.controls,
149
+ media.controls,
150
+ affpol.controls
151
+ )
152
+ )
153
+
154
+ ## transform control variables by creating dummies and demeaning
155
+ controls.trans <- list()
156
+ for (j in controls.raw){
157
+ ## convert to dummies if needed
158
+ controls.j <- model.matrix(as.formula('~ 0 + ' %.% j),
159
+ model.frame(as.formula('~ 0 + ' %.% j),
160
+ data = d,
161
+ na.action = 'na.pass'
162
+ )
163
+ )
164
+ ## demean by column
165
+ controls.j <- sweep(controls.j,
166
+ MARGIN = 2,
167
+ STATS = colMeans(controls.j, na.rm = TRUE),
168
+ FUN = `-`,
169
+ )
170
+ colnames(controls.j) <- make.names(colnames(controls.j))
171
+ ## remove control from original data
172
+ d[[j]] <- NULL
173
+ ## reinsert transformed control
174
+ d <- cbind(d, controls.j)
175
+ ## keep track of which original controls map to which transformed controls
176
+ controls.trans[[j]] <- colnames(controls.j)
177
+ }
178
+
179
+ ## map original control variables to transformed versions
180
+ platform.controls <- unlist(controls.trans[platform.controls])
181
+ mwpolicy.controls <- unlist(controls.trans[mwpolicy.controls])
182
+ media.controls <- unlist(controls.trans[media.controls])
183
+ affpol.controls <- unlist(controls.trans[affpol.controls])
184
+
185
+
186
+
187
+ ##############
188
+ ## outcomes ##
189
+ ##############
190
+
191
+ ### hypothesis family 1: platform interactions ###
192
+
193
+ ## platform interaction time: compute windorized usage time
194
+ warning('diverges from pap, 95% windsorized due to extreme outliers')
195
+ d[, platform_duration := duration]
196
+ d[platform_duration <= quantile(d$duration, .025),
197
+ platform_duration := quantile(d$duration, .025)
198
+ ]
199
+ d[platform_duration >= quantile(d$duration, .975),
200
+ platform_duration := quantile(d$duration, .975)
201
+ ]
202
+ ## all platform interaction outcomes
203
+ platform.outcomes <- c('pro_fraction_chosen',
204
+ 'positive_interactions', # positive - negative (dislike)
205
+ 'platform_duration'
206
+ )
207
+
208
+
209
+
210
+ ### hypothesis family 2: MW policy attitudes ###
211
+
212
+ ## only one preregistered outcome in this family
213
+ mwpolicy.outcomes <- 'mw_index_w2'
214
+ ## added 4 jun 2024 at request of reviewers
215
+ mwpolicy.outcomes.understanding <- c('mw_restrict_w2',
216
+ 'mw_help_w2'
217
+ )
218
+
219
+
220
+
221
+ ### hypothesis family 3: media trust ###
222
+ media.outcomes <- c('trust_majornews_w2',
223
+ 'trust_youtube_w2',
224
+ 'fabricate_majornews_w2',
225
+ 'fabricate_youtube_w2'
226
+ )
227
+
228
+
229
+
230
+ ### hypothesis family 4: affective polarization ###
231
+ affpol.outcomes <- c('affpol_ft_w2',
232
+ 'affpol_smart_w2',
233
+ 'affpol_comfort_w2'
234
+ )
235
+
236
+ outcomes <- unique(c(
237
+ platform.outcomes,
238
+ mwpolicy.outcomes,
239
+ media.outcomes,
240
+ affpol.outcomes
241
+ )
242
+ )
243
+
244
+
245
+
246
+ ################
247
+ ## treatments ##
248
+ ################
249
+
250
+ ## create attitude dummies
251
+ ## (pro/anti stance on issue has opposite lib/con meaning from study 1)
252
+ d[, attitude := c('pro', 'neutral', 'anti')[thirds]]
253
+ d[, attitude.anti := as.numeric(attitude == 'anti')]
254
+ d[, attitude.neutral := as.numeric(attitude == 'neutral')]
255
+ d[, attitude.pro := as.numeric(attitude == 'pro')]
256
+
257
+ ## create seed dummies
258
+ d[, seed.anti := as.numeric(treatment_seed == 'anti')]
259
+ d[, seed.pro := as.numeric(treatment_seed == 'pro')]
260
+
261
+ ## create recsys dummies
262
+ d[, recsys.22 := as.numeric(treatment_arm %like% '22')]
263
+ d[, recsys.31 := as.numeric(treatment_arm %like% '31')]
264
+
265
+ ## manually define coefficients to estimate
266
+ treatments <- c('attitude.anti:recsys.22',
267
+ 'attitude.anti:recsys.31',
268
+ 'attitude.neutral:seed.anti:recsys.22',
269
+ 'attitude.neutral:seed.pro:recsys.22',
270
+ 'attitude.neutral:seed.anti:recsys.31',
271
+ 'attitude.neutral:seed.pro:recsys.31',
272
+ 'attitude.pro:recsys.22',
273
+ 'attitude.pro:recsys.31'
274
+ )
275
+
276
+ contrasts <- rbind(
277
+ i = c(treat = 'attitude.pro:recsys.31',
278
+ ctrl = 'attitude.pro:recsys.22'
279
+ ),
280
+ ii = c(treat = 'attitude.anti:recsys.31',
281
+ ctrl = 'attitude.anti:recsys.22'
282
+ ),
283
+ iii = c(treat = 'attitude.neutral:seed.pro:recsys.31',
284
+ ctrl = 'attitude.neutral:seed.pro:recsys.22'
285
+ ),
286
+ iv = c(treat = 'attitude.neutral:seed.anti:recsys.31',
287
+ ctrl = 'attitude.neutral:seed.anti:recsys.22'
288
+ ),
289
+ # in (v-vi), pro/anti order is reversed from study 1 to ensure that
290
+ # - 1st condition (treatment) is always the conservative video
291
+ # - 2nd condition (control) is always the liberal video
292
+ v = c(treat = 'attitude.neutral:seed.anti:recsys.31',
293
+ ctrl = 'attitude.neutral:seed.pro:recsys.31'
294
+ ),
295
+ vi = c(treat = 'attitude.neutral:seed.anti:recsys.22',
296
+ ctrl = 'attitude.neutral:seed.pro:recsys.22'
297
+ )
298
+ )
299
+
300
+ ## check that contrasts are valid
301
+ assert_that(all(unlist(contrasts) %in% treatments))
302
+
303
+ ## check that specifications are equivalent
304
+ coefs.v1 <- coef(lm(mw_index_w2 ~ 0 + attitude:treatment_arm, d))
305
+ coefs.v2 <- coef(
306
+ lm(mw_index_w2 ~
307
+ 0 +
308
+ attitude.anti:recsys.22 +
309
+ attitude.anti:recsys.31 +
310
+ attitude.neutral:seed.anti:recsys.22 +
311
+ attitude.neutral:seed.pro:recsys.22 +
312
+ attitude.neutral:seed.anti:recsys.31 +
313
+ attitude.neutral:seed.pro:recsys.31 +
314
+ attitude.pro:recsys.22 +
315
+ attitude.pro:recsys.31,
316
+ d
317
+ )
318
+ )
319
+ assert_that(all.equal(unname(sort(coefs.v1)), unname(sort(coefs.v2))))
320
+
321
+ ##########################
322
+ ## hierarchical testing ##
323
+ ##########################
324
+
325
+ ## initialize top layer p-values:
326
+ ## does treatment have any effect on any outcome in family
327
+ families <- c(
328
+ 'platform',
329
+ 'mwpolicy',
330
+ 'media',
331
+ 'affpol'
332
+ )
333
+ layer1.pvals <- rep(NA_real_, length(families))
334
+ layer1.notes <- rep('', length(families))
335
+ names(layer1.pvals) <- families
336
+
337
+ ## initialize 2nd layer p-values:
338
+ ## which treatment has detectable effect?
339
+ contrast.pvals <- rep(NA_real_, nrow(contrasts))
340
+ names(contrast.pvals) <- paste(contrasts[, 'treat'],
341
+ contrasts[, 'ctrl'],
342
+ sep = '.vs.'
343
+ )
344
+ layer2.pvals <- list(
345
+ platform = contrast.pvals,
346
+ mwpolicy = contrast.pvals,
347
+ media = contrast.pvals,
348
+ affpol = contrast.pvals
349
+ )
350
+ rm(contrast.pvals)
351
+
352
+ ## initialize 3rd layer p-values:
353
+ ## on which specific outcome in family?
354
+ layer3.pvals <- list()
355
+ layer3.ests <- list()
356
+ layer3.ses <- list()
357
+ layer3.notes <- list()
358
+ for (i in 1:length(families)){
359
+ family <- families[i]
360
+ layer3.pvals[[family]] <- list()
361
+ layer3.ests[[family]] <- list()
362
+ layer3.ses[[family]] <- list()
363
+ layer3.notes[[family]] <- list()
364
+ outcomes <- get(family %.% '.outcomes')
365
+ for (j in 1:nrow(contrasts)){
366
+ contrast <- paste(contrasts[j, 'treat'],
367
+ contrasts[j, 'ctrl'],
368
+ sep = '.vs.'
369
+ )
370
+ layer3.pvals[[family]][[contrast]] <- numeric(0)
371
+ layer3.ests[[family]][[contrast]] <- numeric(0)
372
+ layer3.ses[[family]][[contrast]] <- numeric(0)
373
+ for (k in 1:length(outcomes)){
374
+ outcome <- outcomes[k]
375
+ layer3.pvals[[family]][[contrast]][outcome] <- NA_real_
376
+ layer3.ests[[family]][[contrast]][outcome] <- NA_real_
377
+ layer3.ses[[family]][[contrast]][outcome] <- NA_real_
378
+ layer3.notes[[family]][outcome] <- ''
379
+ }
380
+ }
381
+ }
382
+
383
+
384
+
385
+ ### begin nested analyses ###
386
+
387
+ for (i in 1:length(families)){
388
+
389
+ family <- families[i]
390
+ family.outcomes <- get(family %.% '.outcomes')
391
+ family.controls <- get(family %.% '.controls')
392
+ family.controls.interactions <- as.character(
393
+ outer(treatments,
394
+ family.controls,
395
+ FUN = function(x, y) x %.% ':' %.% y
396
+ )
397
+ )
398
+
399
+ family.formula <-
400
+ 'cbind(' %.% # outcomes
401
+ paste(family.outcomes,
402
+ collapse = ', '
403
+ ) %.% ') ~\n0 +\n' %.%
404
+ paste(treatments, # treatments (base terms)
405
+ collapse = ' +\n'
406
+ ) %.% ' +\n' %.%
407
+ paste(family.controls, # controls (base terms)
408
+ collapse = ' +\n'
409
+ )
410
+
411
+ cat(rep('=', 80),
412
+ '\n\nHYPOTHESIS FAMILY: ',
413
+ family,
414
+ '\n\nrunning mlm:\n\n',
415
+ family.formula,
416
+ '\n\n',
417
+ sep = ''
418
+ )
419
+
420
+ ## run model
421
+ family.mod <- lm(family.formula, d)
422
+ ## hack to eliminate NA coefs
423
+ if (any(is.na(coef(family.mod)))){
424
+ if ('mlm' %in% class(family.mod)){
425
+ drop <- rownames(coef(family.mod))[is.na(coef(family.mod))[, 1]]
426
+ } else {
427
+ drop <- names(coef(family.mod))[is.na(coef(family.mod))]
428
+ }
429
+ drop <- convert.interaction.names(drop,
430
+ c(family.controls,
431
+ family.controls.interactions
432
+ )
433
+ )
434
+ layer1.notes[[i]] <-
435
+ layer1.notes[[i]] %.%
436
+ 'dropped the following coefs: ' %.%
437
+ paste(drop, sep = ', ') %.%
438
+ '\n\n'
439
+ family.formula <- gsub(
440
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
441
+ '',
442
+ family.formula
443
+ )
444
+ family.mod <- lm(family.formula, d)
445
+ }
446
+
447
+ family.vcov <- vcovHC(family.mod)
448
+ if (is.null(dim(coef(family.mod)))){
449
+ coef.names <- names(coef(family.mod))
450
+ } else {
451
+ coef.names <- rownames(coef(family.mod))
452
+ }
453
+
454
+ ### top layer: test overall significance of all contrasts on all outcomes ###
455
+ ## convert interaction terms to whatever mlm() named it
456
+ treats <- convert.interaction.names(contrasts[, 'treat'], coef.names)
457
+ ctrls <- convert.interaction.names(contrasts[, 'ctrl'], coef.names)
458
+ ## test jointly
459
+ lht.attempt <- tryCatch({
460
+ if ('mlm' %in% class(family.mod)){
461
+ contrast.lht <- linearHypothesis(
462
+ family.mod,
463
+ vcov. = family.vcov,
464
+ hypothesis.matrix = sprintf('%s - %s', treats, ctrls),
465
+ rhs = matrix(0, nrow = nrow(contrasts), ncol = length(family.outcomes)),
466
+ test = 'Pillai'
467
+ )
468
+ layer1.pvals[[i]] <- extract.lht(contrast.lht)[, 'Pr(>F)']
469
+ } else {
470
+ contrast.lht <- linearHypothesis(
471
+ family.mod,
472
+ vcov. = family.vcov,
473
+ hypothesis.matrix = sprintf('%s - %s', treats, ctrls),
474
+ rhs = matrix(0, nrow = nrow(contrasts), ncol = length(family.outcomes)),
475
+ test = 'F'
476
+ )
477
+ layer1.pvals[[i]] <- contrast.lht[['Pr(>F)']][2]
478
+ }
479
+ },
480
+ error = function(e){
481
+ warning(sprintf('caught error in %s family:', family), e)
482
+ ## return error as string for inclusion in notes
483
+ 'caught error: ' %.%
484
+ e %.%
485
+ '\n\n'
486
+ })
487
+ if (lht.attempt %like% 'caught error'){
488
+ layer1.notes[[i]] <-
489
+ layer1.notes[[i]] %.% lht.attempt
490
+ }
491
+
492
+
493
+
494
+ ### layer 2: test each contrast individually on all outcomes ###
495
+
496
+ for (j in 1:nrow(contrasts)){
497
+ ## test group equality on all outcomes
498
+ if ('mlm' %in% class(family.mod)){
499
+ contrast.lht <-
500
+ linearHypothesis(
501
+ family.mod,
502
+ vcov. = family.vcov,
503
+ hypothesis.matrix = sprintf('%s - %s', treats[j], ctrls[j]),
504
+ rhs = matrix(0, nrow = 1, ncol = length(family.outcomes)),
505
+ test = 'Pillai'
506
+ )
507
+ layer2.pvals[[i]][j] <- extract.lht(contrast.lht)[, 'Pr(>F)']
508
+ } else {
509
+ contrast.lht <- linearHypothesis(
510
+ family.mod,
511
+ vcov. = family.vcov,
512
+ hypothesis.matrix = sprintf('%s - %s', treats[j], ctrls[j]),
513
+ rhs = matrix(0, nrow = 1, ncol = length(family.outcomes)),
514
+ test = 'F'
515
+ )
516
+ layer2.pvals[[i]][j] <- contrast.lht[['Pr(>F)']][2]
517
+ }
518
+ }
519
+
520
+ ### layer 3: test each contrast on each outcome individually ###
521
+
522
+ for (k in 1:length(family.outcomes)){
523
+
524
+ outcome <- family.outcomes[k]
525
+
526
+ outcome.formula <-
527
+ outcome %.% ' ~\n0 +\n' %.%
528
+ paste(treatments, # treatments (base terms)
529
+ collapse = ' +\n'
530
+ ) %.% ' +\n' %.%
531
+ paste(family.controls, # controls (base terms)
532
+ collapse = ' +\n'
533
+ )
534
+
535
+ cat(rep('-', 40), '\n\nrunning lm:\n\n', outcome.formula, '\n\n', sep = '')
536
+
537
+ outcome.mod <- lm(outcome.formula, d)
538
+ ## hack to eliminate NA coefs
539
+ if (any(is.na(coef(outcome.mod)))){
540
+ drop <- names(coef(outcome.mod))[is.na(coef(outcome.mod))]
541
+ drop <- convert.interaction.names(drop,
542
+ c(family.controls,
543
+ family.controls.interactions
544
+ )
545
+ )
546
+ layer3.notes[[i]][k] <-
547
+ layer3.notes[[i]][k] %.%
548
+ 'dropped the following coefs: ' %.%
549
+ paste(drop, sep = ', ') %.%
550
+ '\n\n'
551
+ outcome.formula <- gsub(
552
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
553
+ '',
554
+ outcome.formula
555
+ )
556
+ outcome.mod <- lm(outcome.formula, d)
557
+ }
558
+
559
+ outcome.vcov <- vcovHC(outcome.mod)
560
+ if (any(!is.finite(outcome.vcov))){
561
+ outcome.vcov <- vcov(outcome.mod)
562
+ layer3.notes[[i]][k] <-
563
+ layer3.notes[[i]][k] %.%
564
+ 'falling back to non-robust vcov\n\n'
565
+ }
566
+ coef.names <- names(coef(outcome.mod))
567
+
568
+ for (j in 1:nrow(contrasts)){
569
+
570
+ ## convert this interaction term to whatever llm() named it
571
+ treat <- convert.interaction.names(contrasts[j, 'treat'], coef.names)
572
+ ctrl <- convert.interaction.names(contrasts[j, 'ctrl'], coef.names)
573
+ ## test group equality on this outcome
574
+ contrast.lht <- linearHypothesis(
575
+ outcome.mod,
576
+ vcov. = outcome.vcov,
577
+ hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
578
+ test = 'F'
579
+ )
580
+ layer3.pvals[[i]][[j]][k] <- contrast.lht[['Pr(>F)']][2]
581
+ layer3.ests[[i]][[j]][k] <- (
582
+ coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]
583
+ ) ## * attr(d[[outcome]], 'scaled:scale') # note: uncomment if rescaling
584
+ layer3.ses[[i]][[j]][k] <- sqrt(
585
+ outcome.vcov[treat, treat] +
586
+ outcome.vcov[ctrl, ctrl] -
587
+ 2 * outcome.vcov[treat, ctrl]
588
+ )
589
+
590
+ ## ## confirm
591
+ ## linearHypothesis(
592
+ ## outcome.mod,
593
+ ## vcov. = outcome.vcov,
594
+ ## hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
595
+ ## test = 'F'
596
+ ## )
597
+ ## (coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl])^2 /
598
+ ## (
599
+ ## outcome.vcov[treat, treat] +
600
+ ## outcome.vcov[ctrl, ctrl] -
601
+ ## 2 * outcome.vcov[treat, ctrl]
602
+ ## )
603
+ ## linearHypothesis(
604
+ ## outcome.mod,
605
+ ## vcov. = outcome.vcov,
606
+ ## hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
607
+ ## test = 'Chisq'
608
+ ## )
609
+ ## 2 - 2 * pnorm(abs(
610
+ ## (coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]) /
611
+ ## sqrt(
612
+ ## outcome.vcov[treat, treat] +
613
+ ## outcome.vcov[ctrl, ctrl] -
614
+ ## 2 * outcome.vcov[treat, ctrl]
615
+ ## )
616
+ ## ))
617
+
618
+ }
619
+
620
+ }
621
+
622
+ }
623
+
624
+
625
+
626
+ #################################
627
+ ## multiple testing correction ##
628
+ #################################
629
+
630
+ thresh <- .05
631
+
632
+ ## if layer-1 f-test is infeasible for a family due to collinearity,
633
+ ## obtain layer-1 p-values for that family by simes
634
+ for (i in which(is.na(layer1.pvals))){
635
+ layer1.pvals[i] <- simes(layer2.pvals[[i]])
636
+ }
637
+
638
+ ## multiple testing adjustment for layer 1
639
+ layer1.pvals.adj <- p.adjust(layer1.pvals, 'BH')
640
+ layer1.nonnull.prop <- mean(layer1.pvals.adj < thresh)
641
+
642
+ ## test layer-2 hypotheses only if layer 1 passes
643
+ layer2.pvals.adj <- layer2.pvals # start by copying unadjusted layer-2 p-values
644
+ layer2.nonnull.prop <- rep(NA, length(layer1.pvals.adj))
645
+ names(layer2.nonnull.prop) <- names(layer1.pvals.adj)
646
+ for (i in 1:length(layer1.pvals)){
647
+ if (layer1.pvals.adj[i] < thresh){ # if layer 1 passes
648
+ ## adjust for multiplicity within layer 2...
649
+ layer2.pvals.adj[[i]] <- p.adjust(layer2.pvals[[i]], 'BH')
650
+ ## ... and inflate to account for selection at layer 1
651
+ layer2.pvals.adj[[i]] <-
652
+ pmin(layer2.pvals.adj[[i]] / layer1.nonnull.prop, 1)
653
+ ## keep track of selection at layer 2 for use in layer 3
654
+ layer2.nonnull.prop[i] <- mean(layer2.pvals.adj[[i]] < thresh)
655
+ } else { # if layer 1 fails
656
+ layer2.pvals.adj[[i]] <- rep(NA_real_, length(layer2.pvals[[i]]))
657
+ names(layer2.pvals.adj[[i]]) <- names(layer2.pvals[[i]])
658
+ }
659
+ }
660
+
661
+ ## test layer-3 hypotheses only if layers 1 & 2 pass
662
+ layer3.pvals.adj <- layer3.pvals # start by copying unadjusted layer-3 p-values
663
+ for (i in 1:length(layer1.pvals.adj)){
664
+ for (j in 1:length(layer2.pvals.adj[[i]])){
665
+ ##
666
+ if (layer1.pvals.adj[i] < thresh && # if layer 1 passes...
667
+ layer2.pvals.adj[[i]][j] < thresh # ... and if layer 2 passes
668
+ ){
669
+ ## adjust for multiplicity within layer 3...
670
+ layer3.pvals.adj[[i]][[j]] <- p.adjust(layer3.pvals[[i]][[j]], 'BH')
671
+ ## ... and inflate to account for selection at layer 1
672
+ layer3.pvals.adj[[i]][[j]] <- pmin(
673
+ layer3.pvals.adj[[i]][[j]] / layer1.nonnull.prop / layer2.nonnull.prop[i],
674
+ 1
675
+ )
676
+ } else {
677
+ layer3.pvals.adj[[i]][[j]] <- rep(NA_real_, length(layer3.pvals[[i]][[j]]))
678
+ names(layer3.pvals.adj[[i]][[j]]) <- names(layer3.pvals[[i]][[j]])
679
+ }
680
+ }
681
+ }
682
+
683
+ pvals.adj <- data.table(layer1 = character(0),
684
+ layer2 = character(0),
685
+ layer3 = character(0),
686
+ p.adj = numeric(0),
687
+ est = numeric(0),
688
+ se = numeric(0)
689
+ )
690
+ for (i in 1:length(layer1.pvals.adj)){
691
+ pvals.adj <- rbind(pvals.adj,
692
+ data.table(layer1 = names(layer1.pvals.adj)[i],
693
+ layer2 = 'overall',
694
+ layer3 = 'overall',
695
+ p.adj = layer1.pvals.adj[i],
696
+ est = NA_real_,
697
+ se = NA_real_
698
+ )
699
+ )
700
+ for (j in 1:length(layer2.pvals.adj[[i]])){
701
+ pvals.adj <- rbind(pvals.adj,
702
+ data.table(layer1 = names(layer1.pvals.adj)[i],
703
+ layer2 = names(layer2.pvals.adj[[i]])[j],
704
+ layer3 = 'overall',
705
+ p.adj = layer2.pvals.adj[[i]][j],
706
+ est = NA_real_,
707
+ se = NA_real_
708
+ )
709
+ )
710
+ for (k in 1:length(layer3.pvals.adj[[i]][[j]])){
711
+ pvals.adj <- rbind(pvals.adj,
712
+ data.table(layer1 = names(layer1.pvals.adj)[i],
713
+ layer2 = names(layer2.pvals.adj[[i]])[j],
714
+ layer3 = names(layer3.pvals.adj[[i]][[j]])[k],
715
+ p.adj = layer3.pvals.adj[[i]][[j]][k],
716
+ est = layer3.ests[[i]][[j]][k],
717
+ se = layer3.ses[[i]][[j]][k]
718
+ )
719
+ )
720
+ }
721
+ }
722
+ }
723
+
724
+ ## write out
725
+ fwrite(pvals.adj, '../results/intermediate data/minimum wage (issue 2)/padj_basecontrol.csv')
726
+
727
+ ## prettify for reading
728
+ pvals.adj.pretty <- pvals.adj
729
+ colnames(pvals.adj.pretty) <- gsub('layer1',
730
+ 'layer1_hypothesisfamily',
731
+ colnames(pvals.adj.pretty)
732
+ )
733
+ colnames(pvals.adj.pretty) <- gsub('layer2',
734
+ 'layer2_treatmentcontrast',
735
+ colnames(pvals.adj.pretty)
736
+ )
737
+ colnames(pvals.adj.pretty) <- gsub('layer3',
738
+ 'layer3_specificoutcome',
739
+ colnames(pvals.adj.pretty)
740
+ )
741
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
742
+ 'attitude\\.(pro|anti|neutral)(:seed\\.(pro|anti))?:recsys.(31|22)',
743
+ '\\1 \\3 \\4',
744
+ layer2_treatmentcontrast
745
+ )]
746
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
747
+ '.vs.',
748
+ ' - ',
749
+ layer2_treatmentcontrast,
750
+ fixed = TRUE
751
+ )]
752
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
753
+ ' +',
754
+ ' ',
755
+ layer2_treatmentcontrast
756
+ )]
757
+ fwrite(pvals.adj.pretty,
758
+ '../results/intermediate data/minimum wage (issue 2)/padj_basecontrol_pretty.csv'
759
+ )
760
+
761
+ print('preregistered results:')
762
+ pvals.adj.pretty[p.adj < .05 & layer3_specificoutcome != 'overall',]
763
+
764
+
765
+
766
+ ##############################################
767
+ ## added 4 jun 2024 at request of reviewers ##
768
+ ##############################################
769
+
770
+ ## analyze components of main policy outcome index that relate to
771
+ ## post-experiment w2 "understanding" of an issue, using w1 version
772
+ ## of that same outcome as the only control (analogous to outcome index
773
+ ## regression, which uses w2 index as outcome and w1 index as control)
774
+
775
+ ## initialize results table
776
+ understanding.results <- data.table(layer2_treatmentcontrast = character(0),
777
+ layer3_specificoutcome = character(0),
778
+ est = numeric(0),
779
+ se = numeric(0),
780
+ p = numeric(0)
781
+ )
782
+
783
+ ## loop over outcomes
784
+ for (k in 1:length(mwpolicy.outcomes.understanding)){
785
+
786
+ outcome <- mwpolicy.outcomes.understanding[k]
787
+
788
+ outcome.formula <-
789
+ outcome %.% ' ~\n0 +\n' %.%
790
+ paste(treatments, # treatments (base terms)
791
+ collapse = ' +\n'
792
+ ) %.% ' +\n' %.%
793
+ paste(gsub('w2', 'w1', outcome), # controls (w1 outcome)
794
+ collapse = ' +\n'
795
+ )
796
+
797
+ cat(rep('-', 40), '\n\nrunning lm:\n\n', outcome.formula, '\n\n', sep = '')
798
+
799
+ outcome.mod <- lm(outcome.formula, d)
800
+ ## hack to eliminate NA coefs
801
+ if (any(is.na(coef(outcome.mod)))){
802
+ drop <- names(coef(outcome.mod))[is.na(coef(outcome.mod))]
803
+ drop <- convert.interaction.names(drop,
804
+ c(family.controls,
805
+ family.controls.interactions
806
+ )
807
+ )
808
+ layer3.notes[[i]][k] <-
809
+ layer3.notes[[i]][k] %.%
810
+ 'dropped the following coefs: ' %.%
811
+ paste(drop, sep = ', ') %.%
812
+ '\n\n'
813
+ outcome.formula <- gsub(
814
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
815
+ '',
816
+ outcome.formula
817
+ )
818
+ outcome.mod <- lm(outcome.formula, d)
819
+ }
820
+
821
+ outcome.vcov <- vcovHC(outcome.mod)
822
+ if (any(!is.finite(outcome.vcov))){
823
+ outcome.vcov <- vcov(outcome.mod)
824
+ layer3.notes[[i]][k] <-
825
+ layer3.notes[[i]][k] %.%
826
+ 'falling back to non-robust vcov\n\n'
827
+ }
828
+ coef.names <- names(coef(outcome.mod))
829
+
830
+ ## loop over treatment contrasts
831
+ for (j in 1:nrow(contrasts)){
832
+
833
+ ## convert this interaction term to whatever llm() named it
834
+ treat <- convert.interaction.names(contrasts[j, 'treat'], coef.names)
835
+ ctrl <- convert.interaction.names(contrasts[j, 'ctrl'], coef.names)
836
+ ## test group equality on this outcome
837
+ contrast.lht <- linearHypothesis(
838
+ outcome.mod,
839
+ vcov. = outcome.vcov,
840
+ hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
841
+ test = 'F'
842
+ )
843
+
844
+ ## prettify name of contrast for readability
845
+
846
+ contrast <- treat %.% ' - ' %.% ctrl
847
+ contrast <- gsub('attitude\\.(pro|anti|neutral)', '\\1', contrast)
848
+ contrast <- gsub('seed\\.(pro|anti)', '\\1', contrast)
849
+ contrast <- gsub('recsys.(31|22)', '\\1', contrast)
850
+ contrast <- gsub(':', ' ', contrast)
851
+ contrast <- gsub(' +', ' ', contrast)
852
+
853
+ p <- contrast.lht[['Pr(>F)']][2]
854
+ est <- (
855
+ coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]
856
+ ) ## * attr(d[[outcome]], 'scaled:scale') # note: uncomment if rescaling
857
+ se <- sqrt(
858
+ outcome.vcov[treat, treat] +
859
+ outcome.vcov[ctrl, ctrl] -
860
+ 2 * outcome.vcov[treat, ctrl]
861
+ )
862
+
863
+ understanding.results <- rbind(
864
+ understanding.results,
865
+ data.table(
866
+ layer2_treatmentcontrast = contrast,
867
+ layer3_specificoutcome = outcome,
868
+ p,
869
+ est,
870
+ se
871
+ )
872
+ )
873
+
874
+ }
875
+
876
+ }
877
+
878
+ ## conduct multiple testing adjustment within newly exploratory results
879
+ understanding.results[, p.adj := p.adjust(p, 'BH')]
880
+ print('exploratory results on understanding-related questions:')
881
+ understanding.results[p.adj < .05,]
882
+ fwrite(understanding.results,
883
+ '../results/intermediate data/minimum wage (issue 2)/understanding_basecontrol_pretty.csv'
884
+ )
885
+
886
+
887
+
888
+ #############################################################
889
+ ## preregistered exploratory heterogeneous effect analysis ##
890
+ #############################################################
891
+
892
+ # outcome is mw_index_w2
893
+ # construct moderators by cutting demographics & pre-treatment vars at midpoint
894
+
895
+ d[,
896
+ pol_interest_hi := as.numeric(
897
+ pol_interest > median(pol_interest, na.rm = TRUE)
898
+ )]
899
+ d[,
900
+ age_hi := as.numeric(
901
+ age > median(age, na.rm = TRUE)
902
+ )]
903
+ d[,
904
+ freq_youtube_hi := as.numeric(
905
+ freq_youtube > median(freq_youtube, na.rm = TRUE)
906
+ )]
907
+
908
+ moderator_variables <- c('pol_interest_hi',
909
+ 'age_hi',
910
+ 'male',
911
+ 'freq_youtube_hi'
912
+ )
913
+ ## added 4 jun 2024 at request of reviewer
914
+ moderator_variables_revision <- 'college'
915
+
916
+ interaction_results <- data.table()
917
+ for (moderator_variable in c(moderator_variables, moderator_variables_revision)){
918
+
919
+ d[, moderator := get(moderator_variable)]
920
+
921
+ mod.attitude.anti <- lm(
922
+ mw_index_w2 ~
923
+ recsys.31 * moderator +
924
+ mw_index_w1, # only control is pre-treatment outcome, as in primary analysis
925
+ data = d[attitude.anti == 1]
926
+ )
927
+ vcov.attitude.anti <- vcovHC(mod.attitude.anti)
928
+ test.attitude.anti <- coeftest(mod.attitude.anti, vcov.attitude.anti)
929
+ interaction_results <- rbind(
930
+ interaction_results,
931
+ data.table(subset = 'attitude.anti',
932
+ interaction = 'recsys.31:' %.% moderator_variable,
933
+ test.attitude.anti['recsys.31:moderator', , drop = FALSE]
934
+ ),
935
+ fill = TRUE
936
+ )
937
+
938
+ mod.attitude.pro <- lm(
939
+ mw_index_w2 ~
940
+ recsys.31 * moderator +
941
+ mw_index_w1, # only control is pre-treatment outcome, as in primary analysis
942
+ data = d[attitude.pro == 1]
943
+ )
944
+ vcov.attitude.pro <- vcovHC(mod.attitude.pro)
945
+ test.attitude.pro <- coeftest(mod.attitude.pro, vcov.attitude.pro)
946
+ interaction_results <- rbind(
947
+ interaction_results,
948
+ data.table(subset = 'attitude.pro',
949
+ interaction = 'recsys.31:' %.% moderator_variable,
950
+ test.attitude.pro['recsys.31:moderator', , drop = FALSE]
951
+ ),
952
+ fill = TRUE
953
+ )
954
+
955
+ mod.attitude.neutral.seed.anti <- lm(
956
+ mw_index_w2 ~
957
+ recsys.31 * moderator +
958
+ mw_index_w1, # only control is pre-treatment outcome, as in primary analysis
959
+ data = d[attitude.neutral == 1 & seed.anti == 1]
960
+ )
961
+ vcov.attitude.neutral.seed.anti <- vcovHC(mod.attitude.neutral.seed.anti)
962
+ test.attitude.neutral.seed.anti <- coeftest(mod.attitude.neutral.seed.anti,
963
+ vcov.attitude.neutral.seed.anti
964
+ )
965
+ interaction_results <- rbind(
966
+ interaction_results,
967
+ data.table(subset = 'attitude.neutral.seed.anti',
968
+ interaction = 'recsys.31:' %.% moderator_variable,
969
+ test.attitude.neutral.seed.anti[
970
+ 'recsys.31:moderator', , drop = FALSE
971
+ ]
972
+ ),
973
+ fill = TRUE
974
+ )
975
+
976
+ mod.attitude.neutral.seed.pro <- lm(
977
+ mw_index_w2 ~
978
+ recsys.31 * moderator +
979
+ mw_index_w1, # only control is pre-treatment outcome, as in primary analysis
980
+ data = d[attitude.neutral == 1 & seed.pro == 1]
981
+ )
982
+ vcov.attitude.neutral.seed.pro <- vcovHC(mod.attitude.neutral.seed.pro)
983
+ test.attitude.neutral.seed.pro <- coeftest(mod.attitude.neutral.seed.pro,
984
+ vcov.attitude.neutral.seed.pro )
985
+ interaction_results <- rbind(
986
+ interaction_results,
987
+ data.table(subset = 'attitude.neutral.seed.pro',
988
+ interaction = 'recsys.31:' %.% moderator_variable,
989
+ test.attitude.neutral.seed.pro[
990
+ 'recsys.31:moderator', , drop = FALSE
991
+ ]
992
+ ),
993
+ fill = TRUE
994
+ )
995
+
996
+ }
997
+
998
+ # very little significant heterogeneity even before multiple testing correction
999
+ # out of 16 tests, 2 have p values of .043 and .032
1000
+ print('heterogeneity results before multiple correction:')
1001
+ interaction_results[`Pr(>|t|)` < .05,]
1002
+ # none survives a BH correction
1003
+ interaction_results[, p.adj := p.adjust(`Pr(>|t|)`, 'BH')]
1004
+ print('heterogeneity p-values after multiple correction:')
1005
+ interaction_results[, p.adj]
1006
+
1007
+ ## added 4 jun 2024 at request of reviewers
1008
+ colnames(interaction_results) <- c(
1009
+ subset = 'subset',
1010
+ interaction = 'interaction',
1011
+ Estimate = 'est',
1012
+ `Std. Error` = 'se',
1013
+ `t value` = 't',
1014
+ `Pr(>|t|)` = 'p',
1015
+ p.adj = 'p.adj'
1016
+ )[colnames(interaction_results)]
1017
+ fwrite(interaction_results,
1018
+ '../results/intermediate data/minimum wage (issue 2)/heterogeneity_basecontrol.csv'
1019
+ )
1020
+
1021
+
1022
+
1023
+ ###############################################
1024
+ ## added 30 sep 2024 at request of reviewers ##
1025
+ ###############################################
1026
+
1027
+ ## what are minimum detectable effects, given multiple testing correction?
1028
+
1029
+ n_sims <- 1000
1030
+ params_sims <- expand.grid(seed = 19104 + 0:(n_sims - 1),
1031
+ effect = seq(from = .01, to = .05, by = .001)
1032
+ )
1033
+
1034
+ ## step 1: identify largest p-value s.t. we would have rejected layer-1 null
1035
+ ## (that at least one treatment contrast has effect on policy index)
1036
+ ## to do this, we hold fixed p-values for all other layer-1 hypothesis families
1037
+ layer1.pvals.mde <- layer1.pvals
1038
+ layer1.pvals.mde['mwpolicy'] <- 0
1039
+ while (p.adjust(layer1.pvals.mde, 'BH')['mwpolicy'] <= .05){
1040
+ layer1.pvals.mde['mwpolicy'] <- layer1.pvals.mde['mwpolicy'] + .001
1041
+ }
1042
+ pval.cutoff <- layer1.pvals.mde['mwpolicy']
1043
+ print('to achieve significance of policy attitude family at layer 1 (pooled test of any effect on policy index from any contrast) when correcting for multiple layer-1 hypothesis families, this is the minimum cutoff value after conducting simes correction of layer 2 pvals:')
1044
+ pval.cutoff
1045
+
1046
+ ## if layer-1 null was rejected for the policy outcome, then we would use this
1047
+ ## correction factor when interpreting layer-2 p-values (for specific contrasts)
1048
+ layer1.nonnull.prop.if.gt.cutoff <- mean(c(
1049
+ p.adjust(layer1.pvals.mde, 'BH')[c('platform', 'media', 'affpol')] < .05,
1050
+ TRUE
1051
+ ))
1052
+
1053
+ ## the sims below will only examine 3/1 vs 2/2 treatment contrasts, so we will
1054
+ ## hold fixed the layer-2 p-values that relate to seed contrasts
1055
+ pvals.for.seed.contrasts.on.policyindex <- layer2.pvals$mwpolicy[
1056
+ c('attitude.neutral:seed.pro:recsys.31.vs.attitude.neutral:seed.anti:recsys.31',
1057
+ 'attitude.neutral:seed.pro:recsys.22.vs.attitude.neutral:seed.anti:recsys.22'
1058
+ )
1059
+ ]
1060
+
1061
+
1062
+
1063
+ ## step 2: prepare simulations based on real data ------------------------------
1064
+
1065
+ mod.attitude.anti <- lm(
1066
+ mw_index_w2 ~ recsys.31 + mw_index_w1,
1067
+ data = d[attitude.anti == 1]
1068
+ )
1069
+ X.attitude.anti <- model.matrix(mod.attitude.anti)
1070
+ residual.sd.attitude.anti <- sd(resid(mod.attitude.anti))
1071
+ ## confirm that this recovers fitted values
1072
+ ## model.matrix(mod.attitude.anti) %*% coef(mod.attitude.anti)
1073
+ assert_that(all(
1074
+ predict(mod.attitude.anti) ==
1075
+ X.attitude.anti %*% coef(mod.attitude.anti)
1076
+ ))
1077
+ ## we will create simulated outcomes, given hypothesized treatment effect
1078
+ ## == intercept + <-- part A
1079
+ ## real coef * real pretreatment attitude + <-- part A
1080
+ ## hypothesized treatment effect * real treatment status + <-- part B
1081
+ ## rnorm(mean = 0, sd = real residual outcome sd) <-- part C
1082
+ ## A: generate fitted values under hypothesized effect size
1083
+ coef.attitude.anti.baseline <- coef(mod.attitude.anti)
1084
+ coef.attitude.anti.baseline['recsys.31'] <- 0
1085
+ Y.attitude.anti.baseline <-
1086
+ as.numeric(X.attitude.anti %*% coef.attitude.anti.baseline)
1087
+ ## C: will be added below with hypothesized effect * treatment
1088
+ ## B: will be drawn below with rnorm(mean=0, sd=residual_sd)
1089
+
1090
+ ## repeat above for respondents with pro attitude
1091
+ mod.attitude.pro <- lm(
1092
+ mw_index_w2 ~ recsys.31 + mw_index_w1,
1093
+ data = d[attitude.pro == 1]
1094
+ )
1095
+ X.attitude.pro <- model.matrix(mod.attitude.pro)
1096
+ residual.sd.attitude.pro <- sd(resid(mod.attitude.pro))
1097
+ coef.attitude.pro.baseline <- coef(mod.attitude.pro)
1098
+ coef.attitude.pro.baseline['recsys.31'] <- 0
1099
+ Y.attitude.pro.baseline <-
1100
+ as.numeric(X.attitude.pro %*% coef.attitude.pro.baseline)
1101
+
1102
+ ## repeat above for respondents with neutral attitude assigned to pro seed
1103
+ mod.attitude.neutral.seed.pro <- lm(
1104
+ mw_index_w2 ~ recsys.31 + mw_index_w1,
1105
+ data = d[attitude.neutral == 1 & seed.pro == 1]
1106
+ )
1107
+ X.attitude.neutral.seed.pro <- model.matrix(mod.attitude.neutral.seed.pro)
1108
+ residual.sd.attitude.neutral.seed.pro <- sd(resid(mod.attitude.neutral.seed.pro))
1109
+ coef.attitude.neutral.seed.pro.baseline <- coef(mod.attitude.neutral.seed.pro)
1110
+ coef.attitude.neutral.seed.pro.baseline['recsys.31'] <- 0
1111
+ Y.attitude.neutral.seed.pro.baseline <-
1112
+ as.numeric(X.attitude.neutral.seed.pro %*% coef.attitude.neutral.seed.pro.baseline)
1113
+
1114
+ ## repeat above for respondents with neutral attitude assigned to anti seed
1115
+ mod.attitude.neutral.seed.anti <- lm(
1116
+ mw_index_w2 ~ recsys.31 + mw_index_w1,
1117
+ data = d[attitude.neutral == 1 & seed.anti == 1]
1118
+ )
1119
+ X.attitude.neutral.seed.anti <- model.matrix(mod.attitude.neutral.seed.anti)
1120
+ residual.sd.attitude.neutral.seed.anti <- sd(resid(mod.attitude.neutral.seed.anti))
1121
+ coef.attitude.neutral.seed.anti.baseline <- coef(mod.attitude.neutral.seed.anti)
1122
+ coef.attitude.neutral.seed.anti.baseline['recsys.31'] <- 0
1123
+ Y.attitude.neutral.seed.anti.baseline <-
1124
+ as.numeric(X.attitude.neutral.seed.anti %*% coef.attitude.neutral.seed.anti.baseline)
1125
+
1126
+
1127
+
1128
+ ## step 3: conduct sims --------------------------------------------------------
1129
+
1130
+ sims.attitude.anti <- foreach(seed = params_sims$seed,
1131
+ effect = params_sims$effect,
1132
+ .combine = rbind
1133
+ ) %dopar%
1134
+ {
1135
+ set.seed(seed)
1136
+ Y <-
1137
+ Y.attitude.anti.baseline +
1138
+ effect * X.attitude.anti[, 'recsys.31'] +
1139
+ rnorm(
1140
+ n = nrow(X.attitude.anti),
1141
+ mean = 0,
1142
+ sd = residual.sd.attitude.anti
1143
+ )
1144
+ mod <- lm(Y ~ 0 + X.attitude.anti)
1145
+ smry <- coeftest(mod, vcovHC(mod))
1146
+ cbind(
1147
+ seed,
1148
+ effect,
1149
+ data.table(smry['X.attitude.antirecsys.31', , drop = FALSE])
1150
+ )
1151
+ }
1152
+
1153
+ sims.attitude.pro <- foreach(seed = params_sims$seed,
1154
+ effect = params_sims$effect,
1155
+ .combine = rbind
1156
+ ) %dopar%
1157
+ {
1158
+ set.seed(seed)
1159
+ Y <-
1160
+ Y.attitude.pro.baseline +
1161
+ effect * X.attitude.pro[, 'recsys.31'] +
1162
+ rnorm(
1163
+ n = nrow(X.attitude.pro),
1164
+ mean = 0,
1165
+ sd = residual.sd.attitude.pro
1166
+ )
1167
+ mod <- lm(Y ~ 0 + X.attitude.pro)
1168
+ smry <- coeftest(mod, vcovHC(mod))
1169
+ cbind(
1170
+ seed,
1171
+ effect,
1172
+ data.table(smry['X.attitude.prorecsys.31', , drop = FALSE])
1173
+ )
1174
+ }
1175
+
1176
+ sims.attitude.neutral.seed.anti <- foreach(seed = params_sims$seed,
1177
+ effect = params_sims$effect,
1178
+ .combine = rbind
1179
+ ) %dopar%
1180
+ {
1181
+ set.seed(seed)
1182
+ Y <-
1183
+ Y.attitude.neutral.seed.anti.baseline +
1184
+ effect * X.attitude.neutral.seed.anti[, 'recsys.31'] +
1185
+ rnorm(
1186
+ n = nrow(X.attitude.neutral.seed.anti),
1187
+ mean = 0,
1188
+ sd = residual.sd.attitude.neutral.seed.anti
1189
+ )
1190
+ mod <- lm(Y ~ 0 + X.attitude.neutral.seed.anti)
1191
+ smry <- coeftest(mod, vcovHC(mod))
1192
+ cbind(
1193
+ seed,
1194
+ effect,
1195
+ data.table(smry['X.attitude.neutral.seed.antirecsys.31', , drop = FALSE])
1196
+ )
1197
+ }
1198
+
1199
+ sims.attitude.neutral.seed.pro <- foreach(seed = params_sims$seed,
1200
+ effect = params_sims$effect,
1201
+ .combine = rbind
1202
+ ) %dopar%
1203
+ {
1204
+ set.seed(seed)
1205
+ Y <-
1206
+ Y.attitude.neutral.seed.pro.baseline +
1207
+ effect * X.attitude.neutral.seed.pro[, 'recsys.31'] +
1208
+ rnorm(
1209
+ n = nrow(X.attitude.neutral.seed.pro),
1210
+ mean = 0,
1211
+ sd = residual.sd.attitude.neutral.seed.pro
1212
+ )
1213
+ mod <- lm(Y ~ 0 + X.attitude.neutral.seed.pro)
1214
+ smry <- coeftest(mod, vcovHC(mod))
1215
+ cbind(
1216
+ seed,
1217
+ effect,
1218
+ data.table(smry['X.attitude.neutral.seed.prorecsys.31', , drop = FALSE])
1219
+ )
1220
+ }
1221
+
1222
+
1223
+
1224
+ ## step 4: analyze power results -----------------------------------------------
1225
+
1226
+ ## without multiple-testing corrections
1227
+
1228
+ print('mde for respondents with anti attitude (conventional analysis w/o correction):')
1229
+ sims.attitude.anti[,
1230
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1231
+ by = effect
1232
+ ][p.reject >= .8, min(effect)]
1233
+
1234
+ print('mde for respondents with pro attitude (conventional analysis w/o correction):')
1235
+ sims.attitude.pro[,
1236
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1237
+ by = effect
1238
+ ][p.reject >= .8, min(effect)]
1239
+
1240
+ print('mde for respondents with neutral attitude assigned to pro seed (conventional analysis w/o correction):')
1241
+ sims.attitude.neutral.seed.anti[,
1242
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1243
+ by = effect
1244
+ ][p.reject >= .8, min(effect)]
1245
+
1246
+ ## respondents with neutral attitude assigned to anti seed
1247
+ sims.attitude.neutral.seed.pro[,
1248
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1249
+ by = effect
1250
+ ][p.reject >= .8, min(effect)]
1251
+
1252
+
1253
+
1254
+ ## with multiple testing correction
1255
+
1256
+ sims <- rbind(
1257
+ sims.attitude.anti,
1258
+ sims.attitude.pro,
1259
+ sims.attitude.neutral.seed.anti,
1260
+ sims.attitude.neutral.seed.pro
1261
+ )
1262
+
1263
+ sims.layer1 <- sims[
1264
+ ,
1265
+ .(pval.pooled = ifelse(
1266
+ ## if these results would lead us to reject layer-1 pooled null of no effect
1267
+ ## on policy attitudes from any treatment contrast
1268
+ simes(c(
1269
+ `Pr(>|t|)`,
1270
+ pvals.for.seed.contrasts.on.policyindex
1271
+ )) <= pval.cutoff,
1272
+ ## disaggregate layer-2 results report with procedure from above
1273
+ ## (BH correction, then inflate by 1/prop of layer-1 sig results)
1274
+ ## then subset to only those p-values relating to 3/1 vs 2/2 contrast
1275
+ ## to see if any are <.05 after full correction procedure
1276
+ yes = min(
1277
+ p.adjust(c(`Pr(>|t|)`, pvals.for.seed.contrasts.on.policyindex),
1278
+ 'BH'
1279
+ )[1:4] / layer1.nonnull.prop.if.gt.cutoff
1280
+ ),
1281
+ no = Inf
1282
+ )
1283
+ ),
1284
+ by = .(seed, effect)
1285
+ ]
1286
+ print('with multiple testing correction:')
1287
+ sims.layer1[, .(p.reject = mean(pval.pooled <= pval.cutoff)), by = effect]
1288
+ print('mde:')
1289
+ sims.layer1[,
1290
+ .(p.reject = mean(pval.pooled <= pval.cutoff)),
1291
+ by = effect
1292
+ ][p.reject >= .8, min(effect)]
1293
+
code/minimum wage (issue 2)/03b_analysis_multipletesting_yg.R ADDED
@@ -0,0 +1,1295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: minimum wage (issue 2)/03b_analysis_multipletesting_yg.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ library(data.table)
9
+ library(car)
10
+ library(sandwich)
11
+ library(lmtest)
12
+ library(ggplot2)
13
+ library(assertthat)
14
+ library(foreach)
15
+ library(doParallel)
16
+ registerDoParallel(cores = detectCores() - 1)
17
+
18
+
19
+
20
+ ###############
21
+ ## functions ##
22
+ ###############
23
+
24
+ `%.%` <- paste0
25
+
26
+ simes <- function(ps){
27
+ min(sort(length(ps) * ps / rank(ps)))
28
+ }
29
+
30
+ ### functions to handle inconsistent interaction ordering of mlm() ###
31
+
32
+ ## convert interaction terms of form 'b#:a#' to 'a#:b#'
33
+ reorder.interaction.names <- function(x, prefix = ''){
34
+ x <- gsub('^' %.% prefix, '', x)
35
+ sapply(strsplit(x, ':'),
36
+ function(y){
37
+ paste(sort(y), collapse = ':')
38
+ })
39
+ }
40
+
41
+ ## take term of form 'a1:b1', look up in vector of form 'b#:a#, return 'b1:a1'
42
+ convert.interaction.names <- function(x, y, prefix.y = ''){
43
+ ind <- match(reorder.interaction.names(x),
44
+ reorder.interaction.names(y, prefix = prefix.y)
45
+ )
46
+ return(y[ind])
47
+ }
48
+
49
+ ## modified from print.linearHypothesis.mlm to use alternate df & return pvals
50
+ ## (print method is responsible for doing the actual computation of pvals)
51
+ extract.lht <- function(x,
52
+ SSP = TRUE,
53
+ SSPE = SSP,
54
+ digits = getOption('digits'),
55
+ df.residual = x$df.residual
56
+ ){
57
+ test <- x$test
58
+ if (!is.null(x$P) && SSP) {
59
+ P <- x$P
60
+ cat("\n Response transformation matrix:\n")
61
+ attr(P, "assign") <- NULL
62
+ attr(P, "contrasts") <- NULL
63
+ print(P, digits = digits)
64
+ }
65
+ if (SSP) {
66
+ cat("\nSum of squares and products for the hypothesis:\n")
67
+ print(x$SSPH, digits = digits)
68
+ }
69
+ if (SSPE) {
70
+ cat("\nSum of squares and products for error:\n")
71
+ print(x$SSPE, digits = digits)
72
+ }
73
+ if ((!is.null(x$singular)) && x$singular) {
74
+ warning("the error SSP matrix is singular; multivariate tests are unavailable")
75
+ return(invisible(x))
76
+ }
77
+ SSPE.qr <- qr(x$SSPE)
78
+ eigs <- Re(eigen(qr.coef(SSPE.qr, x$SSPH), symmetric = FALSE)$values)
79
+ tests <- matrix(NA, 4, 4)
80
+ rownames(tests) <- c("Pillai", "Wilks", "Hotelling-Lawley",
81
+ "Roy")
82
+ if ("Pillai" %in% test)
83
+ tests[1, 1:4] <- car:::Pillai(eigs, x$df, df.residual)
84
+ if ("Wilks" %in% test)
85
+ tests[2, 1:4] <- car:::Wilks(eigs, x$df, df.residual)
86
+ if ("Hotelling-Lawley" %in% test)
87
+ tests[3, 1:4] <- car:::HL(eigs, x$df, df.residual)
88
+ if ("Roy" %in% test)
89
+ tests[4, 1:4] <- car:::Roy(eigs, x$df, df.residual)
90
+ tests <- na.omit(tests)
91
+ ok <- tests[, 2] >= 0 & tests[, 3] > 0 & tests[, 4] > 0
92
+ ok <- !is.na(ok) & ok
93
+ tests <- cbind(x$df, tests, pf(tests[ok, 2], tests[ok, 3],
94
+ tests[ok, 4], lower.tail = FALSE))
95
+ colnames(tests) <- c("Df", "test stat", "approx F", "num Df",
96
+ "den Df", "Pr(>F)")
97
+ tests <- structure(as.data.frame(tests),
98
+ heading = paste("\nMultivariate Test",
99
+ if (nrow(tests) > 1)
100
+ "s", ": ", x$title, sep = ""),
101
+ class = c("anova",
102
+ "data.frame"
103
+ )
104
+ )
105
+ return(tests)
106
+ }
107
+
108
+
109
+
110
+ ###############
111
+ ## load data ##
112
+ ###############
113
+
114
+ d <- fread('../results/intermediate data/minimum wage (issue 2)/yg_w12_clean.csv')
115
+
116
+ ## drop pure control
117
+ d <- d[treatment_arm != 'control',]
118
+
119
+ ## drop NA video counts
120
+ d <- d[!is.na(pro) & !is.na(anti),]
121
+
122
+
123
+
124
+ ##############
125
+ ## controls ##
126
+ ##############
127
+
128
+ platform.controls <- c('age_cat',
129
+ 'male',
130
+ 'pol_interest',
131
+ 'freq_youtube'
132
+ )
133
+
134
+ mwpolicy.controls <- 'mw_index_w1'
135
+
136
+ media.controls <- c('trust_majornews_w1',
137
+ 'trust_youtube_w1',
138
+ 'fabricate_majornews_w1',
139
+ 'fabricate_youtube_w1'
140
+ )
141
+
142
+ affpol.controls <- c('affpol_ft',
143
+ 'affpol_smart',
144
+ 'affpol_comfort'
145
+ )
146
+
147
+ controls.raw <- unique(c(platform.controls,
148
+ mwpolicy.controls,
149
+ media.controls,
150
+ affpol.controls
151
+ )
152
+ )
153
+
154
+ ## transform control variables by creating dummies and demeaning
155
+ controls.trans <- list()
156
+ for (j in controls.raw){
157
+ ## convert to dummies if needed
158
+ controls.j <- model.matrix(as.formula('~ 0 + ' %.% j),
159
+ model.frame(as.formula('~ 0 + ' %.% j),
160
+ data = d,
161
+ na.action = 'na.pass'
162
+ )
163
+ )
164
+ ## demean by column
165
+ controls.j <- sweep(controls.j,
166
+ MARGIN = 2,
167
+ STATS = colMeans(controls.j, na.rm = TRUE),
168
+ FUN = `-`,
169
+ )
170
+ colnames(controls.j) <- make.names(colnames(controls.j))
171
+ ## remove control from original data
172
+ d[[j]] <- NULL
173
+ ## reinsert transformed control
174
+ d <- cbind(d, controls.j)
175
+ ## keep track of which original controls map to which transformed controls
176
+ controls.trans[[j]] <- colnames(controls.j)
177
+ }
178
+
179
+ ## map original control variables to transformed versions
180
+ platform.controls <- unlist(controls.trans[platform.controls])
181
+ mwpolicy.controls <- unlist(controls.trans[mwpolicy.controls])
182
+ media.controls <- unlist(controls.trans[media.controls])
183
+ affpol.controls <- unlist(controls.trans[affpol.controls])
184
+
185
+
186
+
187
+ ##############
188
+ ## outcomes ##
189
+ ##############
190
+
191
+ ### hypothesis family 1: platform interactions ###
192
+
193
+ ## platform interaction time: compute windorized usage time
194
+ warning('diverges from pap, 95% windsorized due to extreme outliers')
195
+ d[, platform_duration := duration]
196
+ d[platform_duration <= quantile(d$duration, .025),
197
+ platform_duration := quantile(d$duration, .025)
198
+ ]
199
+ d[platform_duration >= quantile(d$duration, .975),
200
+ platform_duration := quantile(d$duration, .975)
201
+ ]
202
+ ## all platform interaction outcomes
203
+ platform.outcomes <- c('pro_fraction_chosen',
204
+ 'positive_interactions', # positive - negative (dislike)
205
+ 'platform_duration'
206
+ )
207
+
208
+
209
+
210
+ ### hypothesis family 2: MW policy attitudes ###
211
+
212
+ ## only one preregistered outcome in this family
213
+ mwpolicy.outcomes <- 'mw_index_w2'
214
+ ## added 4 jun 2024 at request of reviewers
215
+ mwpolicy.outcomes.understanding <- c('mw_restrict_w2',
216
+ 'mw_help_w2'
217
+ )
218
+
219
+
220
+
221
+ ### hypothesis family 3: media trust ###
222
+ media.outcomes <- c('trust_majornews_w2',
223
+ 'trust_youtube_w2',
224
+ 'fabricate_majornews_w2',
225
+ 'fabricate_youtube_w2'
226
+ )
227
+
228
+
229
+
230
+ ### hypothesis family 4: affective polarization ###
231
+ affpol.outcomes <- c('affpol_ft_w2',
232
+ 'affpol_smart_w2',
233
+ 'affpol_comfort_w2'
234
+ )
235
+
236
+ outcomes <- unique(c(
237
+ platform.outcomes,
238
+ mwpolicy.outcomes,
239
+ media.outcomes,
240
+ affpol.outcomes
241
+ )
242
+ )
243
+
244
+
245
+
246
+ ################
247
+ ## treatments ##
248
+ ################
249
+
250
+ ## create attitude dummies
251
+ ## (pro/anti stance on issue has opposite lib/con meaning from study 1)
252
+ d[, attitude := c('pro', 'neutral', 'anti')[thirds]]
253
+ d[, attitude.anti := as.numeric(attitude == 'anti')]
254
+ d[, attitude.neutral := as.numeric(attitude == 'neutral')]
255
+ d[, attitude.pro := as.numeric(attitude == 'pro')]
256
+
257
+ ## create seed dummies
258
+ d[, seed.anti := as.numeric(treatment_seed == 'anti')]
259
+ d[, seed.pro := as.numeric(treatment_seed == 'pro')]
260
+
261
+ ## create recsys dummies
262
+ d[, recsys.22 := as.numeric(treatment_arm %like% '22')]
263
+ d[, recsys.31 := as.numeric(treatment_arm %like% '31')]
264
+
265
+ ## manually define coefficients to estimate
266
+ treatments <- c('attitude.anti:recsys.22',
267
+ 'attitude.anti:recsys.31',
268
+ 'attitude.neutral:seed.anti:recsys.22',
269
+ 'attitude.neutral:seed.pro:recsys.22',
270
+ 'attitude.neutral:seed.anti:recsys.31',
271
+ 'attitude.neutral:seed.pro:recsys.31',
272
+ 'attitude.pro:recsys.22',
273
+ 'attitude.pro:recsys.31'
274
+ )
275
+
276
+ contrasts <- rbind(
277
+ i = c(treat = 'attitude.pro:recsys.31',
278
+ ctrl = 'attitude.pro:recsys.22'
279
+ ),
280
+ ii = c(treat = 'attitude.anti:recsys.31',
281
+ ctrl = 'attitude.anti:recsys.22'
282
+ ),
283
+ iii = c(treat = 'attitude.neutral:seed.pro:recsys.31',
284
+ ctrl = 'attitude.neutral:seed.pro:recsys.22'
285
+ ),
286
+ iv = c(treat = 'attitude.neutral:seed.anti:recsys.31',
287
+ ctrl = 'attitude.neutral:seed.anti:recsys.22'
288
+ ),
289
+ # in (v-vi), pro/anti order is reversed from study 1 to ensure that
290
+ # - 1st condition (treatment) is always the conservative video
291
+ # - 2nd condition (control) is always the liberal video
292
+ v = c(treat = 'attitude.neutral:seed.anti:recsys.31',
293
+ ctrl = 'attitude.neutral:seed.pro:recsys.31'
294
+ ),
295
+ vi = c(treat = 'attitude.neutral:seed.anti:recsys.22',
296
+ ctrl = 'attitude.neutral:seed.pro:recsys.22'
297
+ )
298
+ )
299
+
300
+ ## check that contrasts are valid
301
+ assert_that(all(unlist(contrasts) %in% treatments))
302
+
303
+ ## check that specifications are equivalent
304
+ coefs.v1 <- coef(lm(mw_index_w2 ~ 0 + attitude:treatment_arm, d))
305
+ coefs.v2 <- coef(
306
+ lm(mw_index_w2 ~
307
+ 0 +
308
+ attitude.anti:recsys.22 +
309
+ attitude.anti:recsys.31 +
310
+ attitude.neutral:seed.anti:recsys.22 +
311
+ attitude.neutral:seed.pro:recsys.22 +
312
+ attitude.neutral:seed.anti:recsys.31 +
313
+ attitude.neutral:seed.pro:recsys.31 +
314
+ attitude.pro:recsys.22 +
315
+ attitude.pro:recsys.31,
316
+ d
317
+ )
318
+ )
319
+ assert_that(all.equal(unname(sort(coefs.v1)), unname(sort(coefs.v2))))
320
+
321
+
322
+
323
+ ##########################
324
+ ## hierarchical testing ##
325
+ ##########################
326
+
327
+ ## initialize top layer p-values:
328
+ ## does treatment have any effect on any outcome in family
329
+ families <- c(
330
+ 'platform',
331
+ 'mwpolicy',
332
+ 'media',
333
+ 'affpol'
334
+ )
335
+ layer1.pvals <- rep(NA_real_, length(families))
336
+ layer1.notes <- rep('', length(families))
337
+ names(layer1.pvals) <- families
338
+
339
+ ## initialize 2nd layer p-values:
340
+ ## which treatment has detectable effect?
341
+ contrast.pvals <- rep(NA_real_, nrow(contrasts))
342
+ names(contrast.pvals) <- paste(contrasts[, 'treat'],
343
+ contrasts[, 'ctrl'],
344
+ sep = '.vs.'
345
+ )
346
+ layer2.pvals <- list(
347
+ platform = contrast.pvals,
348
+ mwpolicy = contrast.pvals,
349
+ media = contrast.pvals,
350
+ affpol = contrast.pvals
351
+ )
352
+ rm(contrast.pvals)
353
+
354
+ ## initialize 3rd layer p-values:
355
+ ## on which specific outcome in family?
356
+ layer3.pvals <- list()
357
+ layer3.ests <- list()
358
+ layer3.ses <- list()
359
+ layer3.notes <- list()
360
+ for (i in 1:length(families)){
361
+ family <- families[i]
362
+ layer3.pvals[[family]] <- list()
363
+ layer3.ests[[family]] <- list()
364
+ layer3.ses[[family]] <- list()
365
+ layer3.notes[[family]] <- list()
366
+ outcomes <- get(family %.% '.outcomes')
367
+ for (j in 1:nrow(contrasts)){
368
+ contrast <- paste(contrasts[j, 'treat'],
369
+ contrasts[j, 'ctrl'],
370
+ sep = '.vs.'
371
+ )
372
+ layer3.pvals[[family]][[contrast]] <- numeric(0)
373
+ layer3.ests[[family]][[contrast]] <- numeric(0)
374
+ layer3.ses[[family]][[contrast]] <- numeric(0)
375
+ for (k in 1:length(outcomes)){
376
+ outcome <- outcomes[k]
377
+ layer3.pvals[[family]][[contrast]][outcome] <- NA_real_
378
+ layer3.ests[[family]][[contrast]][outcome] <- NA_real_
379
+ layer3.ses[[family]][[contrast]][outcome] <- NA_real_
380
+ layer3.notes[[family]][outcome] <- ''
381
+ }
382
+ }
383
+ }
384
+
385
+
386
+
387
+ ### begin nested analyses ###
388
+
389
+ for (i in 1:length(families)){
390
+
391
+ family <- families[i]
392
+ family.outcomes <- get(family %.% '.outcomes')
393
+ family.controls <- get(family %.% '.controls')
394
+ family.controls.interactions <- as.character(
395
+ outer(treatments,
396
+ family.controls,
397
+ FUN = function(x, y) x %.% ':' %.% y
398
+ )
399
+ )
400
+
401
+ family.formula <-
402
+ 'cbind(' %.% # outcomes
403
+ paste(family.outcomes,
404
+ collapse = ', '
405
+ ) %.% ') ~\n0 +\n' %.%
406
+ paste(treatments, # treatments (base terms)
407
+ collapse = ' +\n'
408
+ ) %.% ' +\n' %.%
409
+ paste(family.controls, # controls (base terms)
410
+ collapse = ' +\n'
411
+ )
412
+
413
+ cat(rep('=', 80),
414
+ '\n\nHYPOTHESIS FAMILY: ',
415
+ family,
416
+ '\n\nrunning mlm:\n\n',
417
+ family.formula,
418
+ '\n\n',
419
+ sep = ''
420
+ )
421
+
422
+ ## run model
423
+ family.mod <- lm(family.formula, d)
424
+
425
+ ## hack to eliminate NA coefs
426
+ if (any(is.na(coef(family.mod)))){
427
+ if ('mlm' %in% class(family.mod)){
428
+ drop <- rownames(coef(family.mod))[is.na(coef(family.mod))[, 1]]
429
+ } else {
430
+ drop <- names(coef(family.mod))[is.na(coef(family.mod))]
431
+ }
432
+ drop <- convert.interaction.names(drop,
433
+ c(family.controls,
434
+ family.controls.interactions
435
+ )
436
+ )
437
+ layer1.notes[[i]] <-
438
+ layer1.notes[[i]] %.%
439
+ 'dropped the following coefs: ' %.%
440
+ paste(drop, sep = ', ') %.%
441
+ '\n\n'
442
+ family.formula <- gsub(
443
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
444
+ '',
445
+ family.formula
446
+ )
447
+ family.mod <- lm(family.formula, d)
448
+ }
449
+
450
+ family.vcov <- vcovHC(family.mod)
451
+ if (is.null(dim(coef(family.mod)))){
452
+ coef.names <- names(coef(family.mod))
453
+ } else {
454
+ coef.names <- rownames(coef(family.mod))
455
+ }
456
+
457
+ ### top layer: test overall significance of all contrasts on all outcomes ###
458
+ ## convert interaction terms to whatever mlm() named it
459
+ treats <- convert.interaction.names(contrasts[, 'treat'], coef.names)
460
+ ctrls <- convert.interaction.names(contrasts[, 'ctrl'], coef.names)
461
+ ## test jointly
462
+ lht.attempt <- tryCatch({
463
+ if ('mlm' %in% class(family.mod)){
464
+ contrast.lht <- linearHypothesis(
465
+ family.mod,
466
+ vcov. = family.vcov,
467
+ hypothesis.matrix = sprintf('%s - %s', treats, ctrls),
468
+ rhs = matrix(0, nrow = nrow(contrasts), ncol = length(family.outcomes)),
469
+ test = 'Pillai'
470
+ )
471
+ layer1.pvals[[i]] <- extract.lht(contrast.lht)[, 'Pr(>F)']
472
+ } else {
473
+ contrast.lht <- linearHypothesis(
474
+ family.mod,
475
+ vcov. = family.vcov,
476
+ hypothesis.matrix = sprintf('%s - %s', treats, ctrls),
477
+ rhs = matrix(0, nrow = nrow(contrasts), ncol = length(family.outcomes)),
478
+ test = 'F'
479
+ )
480
+ layer1.pvals[[i]] <- contrast.lht[['Pr(>F)']][2]
481
+ }
482
+ },
483
+ error = function(e){
484
+ warning(sprintf('caught error in %s family:', family), e)
485
+ ## return error as string for inclusion in notes
486
+ 'caught error: ' %.%
487
+ e %.%
488
+ '\n\n'
489
+ })
490
+ if (lht.attempt %like% 'caught error'){
491
+ layer1.notes[[i]] <-
492
+ layer1.notes[[i]] %.% lht.attempt
493
+ }
494
+
495
+
496
+
497
+ ### layer 2: test each contrast individually on all outcomes ###
498
+
499
+ for (j in 1:nrow(contrasts)){
500
+ ## test group equality on all outcomes
501
+ if ('mlm' %in% class(family.mod)){
502
+ contrast.lht <-
503
+ linearHypothesis(
504
+ family.mod,
505
+ vcov. = family.vcov,
506
+ hypothesis.matrix = sprintf('%s - %s', treats[j], ctrls[j]),
507
+ rhs = matrix(0, nrow = 1, ncol = length(family.outcomes)),
508
+ test = 'Pillai'
509
+ )
510
+ layer2.pvals[[i]][j] <- extract.lht(contrast.lht)[, 'Pr(>F)']
511
+ } else {
512
+ contrast.lht <- linearHypothesis(
513
+ family.mod,
514
+ vcov. = family.vcov,
515
+ hypothesis.matrix = sprintf('%s - %s', treats[j], ctrls[j]),
516
+ rhs = matrix(0, nrow = 1, ncol = length(family.outcomes)),
517
+ test = 'F'
518
+ )
519
+ layer2.pvals[[i]][j] <- contrast.lht[['Pr(>F)']][2]
520
+ }
521
+ }
522
+
523
+ ### layer 3: test each contrast on each outcome individually ###
524
+
525
+ for (k in 1:length(family.outcomes)){
526
+
527
+ outcome <- family.outcomes[k]
528
+
529
+ outcome.formula <-
530
+ outcome %.% ' ~\n0 +\n' %.%
531
+ paste(treatments, # treatments (base terms)
532
+ collapse = ' +\n'
533
+ ) %.% ' +\n' %.%
534
+ paste(family.controls, # controls (base terms)
535
+ collapse = ' +\n'
536
+ )
537
+
538
+ cat(rep('-', 40), '\n\nrunning lm:\n\n', outcome.formula, '\n\n', sep = '')
539
+
540
+ outcome.mod <- lm(outcome.formula, d)
541
+ ## hack to eliminate NA coefs
542
+ if (any(is.na(coef(outcome.mod)))){
543
+ drop <- names(coef(outcome.mod))[is.na(coef(outcome.mod))]
544
+ drop <- convert.interaction.names(drop,
545
+ c(family.controls,
546
+ family.controls.interactions
547
+ )
548
+ )
549
+ layer3.notes[[i]][k] <-
550
+ layer3.notes[[i]][k] %.%
551
+ 'dropped the following coefs: ' %.%
552
+ paste(drop, sep = ', ') %.%
553
+ '\n\n'
554
+ outcome.formula <- gsub(
555
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
556
+ '',
557
+ outcome.formula
558
+ )
559
+ outcome.mod <- lm(outcome.formula, d)
560
+ }
561
+
562
+ outcome.vcov <- vcovHC(outcome.mod)
563
+ if (any(!is.finite(outcome.vcov))){
564
+ outcome.vcov <- vcov(outcome.mod)
565
+ layer3.notes[[i]][k] <-
566
+ layer3.notes[[i]][k] %.%
567
+ 'falling back to non-robust vcov\n\n'
568
+ }
569
+ coef.names <- names(coef(outcome.mod))
570
+
571
+ for (j in 1:nrow(contrasts)){
572
+
573
+ ## convert this interaction term to whatever llm() named it
574
+ treat <- convert.interaction.names(contrasts[j, 'treat'], coef.names)
575
+ ctrl <- convert.interaction.names(contrasts[j, 'ctrl'], coef.names)
576
+ ## test group equality on this outcome
577
+ contrast.lht <- linearHypothesis(
578
+ outcome.mod,
579
+ vcov. = outcome.vcov,
580
+ hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
581
+ test = 'F'
582
+ )
583
+ layer3.pvals[[i]][[j]][k] <- contrast.lht[['Pr(>F)']][2]
584
+ layer3.ests[[i]][[j]][k] <- (
585
+ coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]
586
+ ) ## * attr(d[[outcome]], 'scaled:scale') # note: uncomment if rescaling
587
+ layer3.ses[[i]][[j]][k] <- sqrt(
588
+ outcome.vcov[treat, treat] +
589
+ outcome.vcov[ctrl, ctrl] -
590
+ 2 * outcome.vcov[treat, ctrl]
591
+ )
592
+
593
+ ## ## confirm
594
+ ## linearHypothesis(
595
+ ## outcome.mod,
596
+ ## vcov. = outcome.vcov,
597
+ ## hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
598
+ ## test = 'F'
599
+ ## )
600
+ ## (coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl])^2 /
601
+ ## (
602
+ ## outcome.vcov[treat, treat] +
603
+ ## outcome.vcov[ctrl, ctrl] -
604
+ ## 2 * outcome.vcov[treat, ctrl]
605
+ ## )
606
+ ## linearHypothesis(
607
+ ## outcome.mod,
608
+ ## vcov. = outcome.vcov,
609
+ ## hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
610
+ ## test = 'Chisq'
611
+ ## )
612
+ ## 2 - 2 * pnorm(abs(
613
+ ## (coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]) /
614
+ ## sqrt(
615
+ ## outcome.vcov[treat, treat] +
616
+ ## outcome.vcov[ctrl, ctrl] -
617
+ ## 2 * outcome.vcov[treat, ctrl]
618
+ ## )
619
+ ## ))
620
+
621
+ }
622
+
623
+ }
624
+
625
+ }
626
+
627
+
628
+
629
+ #################################
630
+ ## multiple testing correction ##
631
+ #################################
632
+
633
+ thresh <- .05
634
+
635
+ ## if layer-1 f-test is infeasible for a family due to collinearity,
636
+ ## obtain layer-1 p-values for that family by simes
637
+ for (i in which(is.na(layer1.pvals))){
638
+ layer1.pvals[i] <- simes(layer2.pvals[[i]])
639
+ }
640
+
641
+ ## multiple testing adjustment for layer 1
642
+ layer1.pvals.adj <- p.adjust(layer1.pvals, 'BH')
643
+ layer1.nonnull.prop <- mean(layer1.pvals.adj < thresh)
644
+
645
+ ## test layer-2 hypotheses only if layer 1 passes
646
+ layer2.pvals.adj <- layer2.pvals # start by copying unadjusted layer-2 p-values
647
+ layer2.nonnull.prop <- rep(NA, length(layer1.pvals.adj))
648
+ names(layer2.nonnull.prop) <- names(layer1.pvals.adj)
649
+ for (i in 1:length(layer1.pvals)){
650
+ if (layer1.pvals.adj[i] < thresh){ # if layer 1 passes
651
+ ## adjust for multiplicity within layer 2...
652
+ layer2.pvals.adj[[i]] <- p.adjust(layer2.pvals[[i]], 'BH')
653
+ ## ... and inflate to account for selection at layer 1
654
+ layer2.pvals.adj[[i]] <-
655
+ pmin(layer2.pvals.adj[[i]] / layer1.nonnull.prop, 1)
656
+ ## keep track of selection at layer 2 for use in layer 3
657
+ layer2.nonnull.prop[i] <- mean(layer2.pvals.adj[[i]] < thresh)
658
+ } else { # if layer 1 fails
659
+ layer2.pvals.adj[[i]] <- rep(NA_real_, length(layer2.pvals[[i]]))
660
+ names(layer2.pvals.adj[[i]]) <- names(layer2.pvals[[i]])
661
+ }
662
+ }
663
+
664
+ ## test layer-3 hypotheses only if layers 1 & 2 pass
665
+ layer3.pvals.adj <- layer3.pvals # start by copying unadjusted layer-3 p-values
666
+ for (i in 1:length(layer1.pvals.adj)){
667
+ for (j in 1:length(layer2.pvals.adj[[i]])){
668
+ ##
669
+ if (layer1.pvals.adj[i] < thresh && # if layer 1 passes...
670
+ layer2.pvals.adj[[i]][j] < thresh # ... and if layer 2 passes
671
+ ){
672
+ ## adjust for multiplicity within layer 3...
673
+ layer3.pvals.adj[[i]][[j]] <- p.adjust(layer3.pvals[[i]][[j]], 'BH')
674
+ ## ... and inflate to account for selection at layer 1
675
+ layer3.pvals.adj[[i]][[j]] <- pmin(
676
+ layer3.pvals.adj[[i]][[j]] / layer1.nonnull.prop / layer2.nonnull.prop[i],
677
+ 1
678
+ )
679
+ } else {
680
+ layer3.pvals.adj[[i]][[j]] <- rep(NA_real_, length(layer3.pvals[[i]][[j]]))
681
+ names(layer3.pvals.adj[[i]][[j]]) <- names(layer3.pvals[[i]][[j]])
682
+ }
683
+ }
684
+ }
685
+
686
+ pvals.adj <- data.table(layer1 = character(0),
687
+ layer2 = character(0),
688
+ layer3 = character(0),
689
+ p.adj = numeric(0),
690
+ est = numeric(0),
691
+ se = numeric(0)
692
+ )
693
+ for (i in 1:length(layer1.pvals.adj)){
694
+ pvals.adj <- rbind(pvals.adj,
695
+ data.table(layer1 = names(layer1.pvals.adj)[i],
696
+ layer2 = 'overall',
697
+ layer3 = 'overall',
698
+ p.adj = layer1.pvals.adj[i],
699
+ est = NA_real_,
700
+ se = NA_real_
701
+ )
702
+ )
703
+ for (j in 1:length(layer2.pvals.adj[[i]])){
704
+ pvals.adj <- rbind(pvals.adj,
705
+ data.table(layer1 = names(layer1.pvals.adj)[i],
706
+ layer2 = names(layer2.pvals.adj[[i]])[j],
707
+ layer3 = 'overall',
708
+ p.adj = layer2.pvals.adj[[i]][j],
709
+ est = NA_real_,
710
+ se = NA_real_
711
+ )
712
+ )
713
+ for (k in 1:length(layer3.pvals.adj[[i]][[j]])){
714
+ pvals.adj <- rbind(pvals.adj,
715
+ data.table(layer1 = names(layer1.pvals.adj)[i],
716
+ layer2 = names(layer2.pvals.adj[[i]])[j],
717
+ layer3 = names(layer3.pvals.adj[[i]][[j]])[k],
718
+ p.adj = layer3.pvals.adj[[i]][[j]][k],
719
+ est = layer3.ests[[i]][[j]][k],
720
+ se = layer3.ses[[i]][[j]][k]
721
+ )
722
+ )
723
+ }
724
+ }
725
+ }
726
+
727
+ ## write out
728
+ dir.create('../results/intermediate data/minimum wage (issue 2)')
729
+ fwrite(pvals.adj, '../results/intermediate data/minimum wage (issue 2)/padj_basecontrol_yg.csv')
730
+
731
+ ## prettify for reading
732
+ pvals.adj.pretty <- pvals.adj
733
+ colnames(pvals.adj.pretty) <- gsub('layer1',
734
+ 'layer1_hypothesisfamily',
735
+ colnames(pvals.adj.pretty)
736
+ )
737
+ colnames(pvals.adj.pretty) <- gsub('layer2',
738
+ 'layer2_treatmentcontrast',
739
+ colnames(pvals.adj.pretty)
740
+ )
741
+ colnames(pvals.adj.pretty) <- gsub('layer3',
742
+ 'layer3_specificoutcome',
743
+ colnames(pvals.adj.pretty)
744
+ )
745
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
746
+ 'attitude\\.(pro|anti|neutral)(:seed\\.(pro|anti))?:recsys.(31|22)',
747
+ '\\1 \\3 \\4',
748
+ layer2_treatmentcontrast
749
+ )]
750
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
751
+ '.vs.',
752
+ ' - ',
753
+ layer2_treatmentcontrast,
754
+ fixed = TRUE
755
+ )]
756
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
757
+ ' +',
758
+ ' ',
759
+ layer2_treatmentcontrast
760
+ )]
761
+ fwrite(pvals.adj.pretty,
762
+ '../results/intermediate data/minimum wage (issue 2)/padj_basecontrol_pretty_yg.csv'
763
+ )
764
+
765
+ print('preregistered results:')
766
+ pvals.adj.pretty[p.adj < .05 & layer3_specificoutcome != 'overall',]
767
+
768
+
769
+
770
+ ##############################################
771
+ ## added 4 jun 2024 at request of reviewers ##
772
+ ##############################################
773
+
774
+ ## analyze components of main policy outcome index that relate to
775
+ ## post-experiment w2 "understanding" of an issue, using w1 version
776
+ ## of that same outcome as the only control (analogous to outcome index
777
+ ## regression, which uses w2 index as outcome and w1 index as control)
778
+
779
+ ## initialize results table
780
+ understanding.results <- data.table(layer2_treatmentcontrast = character(0),
781
+ layer3_specificoutcome = character(0),
782
+ est = numeric(0),
783
+ se = numeric(0),
784
+ p = numeric(0)
785
+ )
786
+
787
+ ## loop over outcomes
788
+ for (k in 1:length(mwpolicy.outcomes.understanding)){
789
+
790
+ outcome <- mwpolicy.outcomes.understanding[k]
791
+
792
+ outcome.formula <-
793
+ outcome %.% ' ~\n0 +\n' %.%
794
+ paste(treatments, # treatments (base terms)
795
+ collapse = ' +\n'
796
+ ) %.% ' +\n' %.%
797
+ paste(gsub('w2', 'w1', outcome), # controls (w1 outcome)
798
+ collapse = ' +\n'
799
+ )
800
+
801
+ cat(rep('-', 40), '\n\nrunning lm:\n\n', outcome.formula, '\n\n', sep = '')
802
+
803
+ outcome.mod <- lm(outcome.formula, d)
804
+ ## hack to eliminate NA coefs
805
+ if (any(is.na(coef(outcome.mod)))){
806
+ drop <- names(coef(outcome.mod))[is.na(coef(outcome.mod))]
807
+ drop <- convert.interaction.names(drop,
808
+ c(family.controls,
809
+ family.controls.interactions
810
+ )
811
+ )
812
+ layer3.notes[[i]][k] <-
813
+ layer3.notes[[i]][k] %.%
814
+ 'dropped the following coefs: ' %.%
815
+ paste(drop, sep = ', ') %.%
816
+ '\n\n'
817
+ outcome.formula <- gsub(
818
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
819
+ '',
820
+ outcome.formula
821
+ )
822
+ outcome.mod <- lm(outcome.formula, d)
823
+ }
824
+
825
+ outcome.vcov <- vcovHC(outcome.mod)
826
+ if (any(!is.finite(outcome.vcov))){
827
+ outcome.vcov <- vcov(outcome.mod)
828
+ layer3.notes[[i]][k] <-
829
+ layer3.notes[[i]][k] %.%
830
+ 'falling back to non-robust vcov\n\n'
831
+ }
832
+ coef.names <- names(coef(outcome.mod))
833
+
834
+ ## loop over treatment contrasts
835
+ for (j in 1:nrow(contrasts)){
836
+
837
+ ## convert this interaction term to whatever llm() named it
838
+ treat <- convert.interaction.names(contrasts[j, 'treat'], coef.names)
839
+ ctrl <- convert.interaction.names(contrasts[j, 'ctrl'], coef.names)
840
+ ## test group equality on this outcome
841
+ contrast.lht <- linearHypothesis(
842
+ outcome.mod,
843
+ vcov. = outcome.vcov,
844
+ hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
845
+ test = 'F'
846
+ )
847
+
848
+ ## prettify name of contrast for readability
849
+
850
+ contrast <- treat %.% ' - ' %.% ctrl
851
+ contrast <- gsub('attitude\\.(pro|anti|neutral)', '\\1', contrast)
852
+ contrast <- gsub('seed\\.(pro|anti)', '\\1', contrast)
853
+ contrast <- gsub('recsys.(31|22)', '\\1', contrast)
854
+ contrast <- gsub(':', ' ', contrast)
855
+ contrast <- gsub(' +', ' ', contrast)
856
+
857
+ p <- contrast.lht[['Pr(>F)']][2]
858
+ est <- (
859
+ coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]
860
+ ) ## * attr(d[[outcome]], 'scaled:scale') # note: uncomment if rescaling
861
+ se <- sqrt(
862
+ outcome.vcov[treat, treat] +
863
+ outcome.vcov[ctrl, ctrl] -
864
+ 2 * outcome.vcov[treat, ctrl]
865
+ )
866
+
867
+ understanding.results <- rbind(
868
+ understanding.results,
869
+ data.table(
870
+ layer2_treatmentcontrast = contrast,
871
+ layer3_specificoutcome = outcome,
872
+ p,
873
+ est,
874
+ se
875
+ )
876
+ )
877
+
878
+ }
879
+
880
+ }
881
+
882
+ ## conduct multiple testing adjustment within newly exploratory results
883
+ understanding.results[, p.adj := p.adjust(p, 'BH')]
884
+ print('exploratory results on understanding-related questions:')
885
+ understanding.results[p.adj < .05,]
886
+ fwrite(understanding.results,
887
+ '../results/intermediate data/minimum wage (issue 2)/understanding_basecontrol_pretty_yg.csv'
888
+ )
889
+
890
+
891
+
892
+ #############################################################
893
+ ## preregistered exploratory heterogeneous effect analysis ##
894
+ #############################################################
895
+
896
+ # outcome is mw_index_w2
897
+ # construct moderators by cutting demographics & pre-treatment vars at midpoint
898
+
899
+ d[,
900
+ pol_interest_hi := as.numeric(
901
+ pol_interest >= median(pol_interest, na.rm = TRUE)
902
+ )]
903
+ d[,
904
+ age_hi := as.numeric(
905
+ age > median(age, na.rm = TRUE)
906
+ )]
907
+ d[,
908
+ freq_youtube_hi := as.numeric(
909
+ freq_youtube > median(freq_youtube, na.rm = TRUE)
910
+ )]
911
+
912
+ moderator_variables <- c('pol_interest_hi',
913
+ 'age_hi',
914
+ 'male',
915
+ 'freq_youtube_hi'
916
+ )
917
+ ## added 4 jun 2024 at request of reviewer
918
+ moderator_variables_revision <- 'college'
919
+
920
+ interaction_results <- data.table()
921
+ for (moderator_variable in c(moderator_variables, moderator_variables_revision)){
922
+
923
+ d[, moderator := get(moderator_variable)]
924
+
925
+ mod.attitude.anti <- lm(
926
+ mw_index_w2 ~
927
+ recsys.31 * moderator +
928
+ mw_index_w1, # only control is pre-treatment outcome, as in primary analysis
929
+ data = d[attitude.anti == 1]
930
+ )
931
+ vcov.attitude.anti <- vcovHC(mod.attitude.anti)
932
+ test.attitude.anti <- coeftest(mod.attitude.anti, vcov.attitude.anti)
933
+ interaction_results <- rbind(
934
+ interaction_results,
935
+ data.table(subset = 'attitude.anti',
936
+ interaction = 'recsys.31:' %.% moderator_variable,
937
+ test.attitude.anti['recsys.31:moderator', , drop = FALSE]
938
+ ),
939
+ fill = TRUE
940
+ )
941
+
942
+ mod.attitude.pro <- lm(
943
+ mw_index_w2 ~
944
+ recsys.31 * moderator +
945
+ mw_index_w1, # only control is pre-treatment outcome, as in primary analysis
946
+ data = d[attitude.pro == 1]
947
+ )
948
+ vcov.attitude.pro <- vcovHC(mod.attitude.pro)
949
+ test.attitude.pro <- coeftest(mod.attitude.pro, vcov.attitude.pro)
950
+ interaction_results <- rbind(
951
+ interaction_results,
952
+ data.table(subset = 'attitude.pro',
953
+ interaction = 'recsys.31:' %.% moderator_variable,
954
+ test.attitude.pro['recsys.31:moderator', , drop = FALSE]
955
+ ),
956
+ fill = TRUE
957
+ )
958
+
959
+ mod.attitude.neutral.seed.anti <- lm(
960
+ mw_index_w2 ~
961
+ recsys.31 * moderator +
962
+ mw_index_w1, # only control is pre-treatment outcome, as in primary analysis
963
+ data = d[attitude.neutral == 1 & seed.anti == 1]
964
+ )
965
+ vcov.attitude.neutral.seed.anti <- vcovHC(mod.attitude.neutral.seed.anti)
966
+ test.attitude.neutral.seed.anti <- coeftest(mod.attitude.neutral.seed.anti,
967
+ vcov.attitude.neutral.seed.anti
968
+ )
969
+ interaction_results <- rbind(
970
+ interaction_results,
971
+ data.table(subset = 'attitude.neutral.seed.anti',
972
+ interaction = 'recsys.31:' %.% moderator_variable,
973
+ test.attitude.neutral.seed.anti[
974
+ 'recsys.31:moderator', , drop = FALSE
975
+ ]
976
+ ),
977
+ fill = TRUE
978
+ )
979
+
980
+ mod.attitude.neutral.seed.pro <- lm(
981
+ mw_index_w2 ~
982
+ recsys.31 * moderator +
983
+ mw_index_w1, # only control is pre-treatment outcome, as in primary analysis
984
+ data = d[attitude.neutral == 1 & seed.pro == 1]
985
+ )
986
+ vcov.attitude.neutral.seed.pro <- vcovHC(mod.attitude.neutral.seed.pro)
987
+ test.attitude.neutral.seed.pro <- coeftest(mod.attitude.neutral.seed.pro,
988
+ vcov.attitude.neutral.seed.pro )
989
+ interaction_results <- rbind(
990
+ interaction_results,
991
+ data.table(subset = 'attitude.neutral.seed.pro',
992
+ interaction = 'recsys.31:' %.% moderator_variable,
993
+ test.attitude.neutral.seed.pro[
994
+ 'recsys.31:moderator', , drop = FALSE
995
+ ]
996
+ ),
997
+ fill = TRUE
998
+ )
999
+
1000
+ }
1001
+
1002
+ # very little significant heterogeneity even before multiple testing correction
1003
+ # out of 16 tests, 1 has p values of .013
1004
+ print('heterogeneity results before multiple correction:')
1005
+ interaction_results[`Pr(>|t|)` < .05,]
1006
+ # does not survive a BH correction
1007
+ interaction_results[, p.adj := p.adjust(`Pr(>|t|)`, 'BH')]
1008
+ print('heterogeneity p-values after multiple correction:')
1009
+ interaction_results[, p.adj]
1010
+
1011
+ ## updated 4 jun 2024 at request of reviewer
1012
+ colnames(interaction_results) <- c(
1013
+ subset = 'subset',
1014
+ interaction = 'interaction',
1015
+ Estimate = 'est',
1016
+ `Std. Error` = 'se',
1017
+ `t value` = 't',
1018
+ `Pr(>|t|)` = 'p',
1019
+ p.adj = 'p.adj'
1020
+ )[colnames(interaction_results)]
1021
+ fwrite(interaction_results,
1022
+ '../results/intermediate data/minimum wage (issue 2)/heterogeneity_basecontrol_yg.csv'
1023
+ )
1024
+
1025
+
1026
+
1027
+ ###############################################
1028
+ ## added 30 sep 2024 at request of reviewers ##
1029
+ ###############################################
1030
+
1031
+ ## what are minimum detectable effects, given multiple testing correction?
1032
+
1033
+ n_sims <- 1000
1034
+ params_sims <- expand.grid(seed = 19104 + 0:(n_sims - 1),
1035
+ effect = seq(from = .01, to = .05, by = .001)
1036
+ )
1037
+
1038
+ ## step 1: identify largest p-value s.t. we would have rejected layer-1 null
1039
+ ## (that at least one treatment contrast has effect on policy index)
1040
+ layer1.pvals.mde <- layer1.pvals
1041
+ layer1.pvals.mde['mwpolicy'] <- 0
1042
+ while (p.adjust(layer1.pvals.mde, 'BH')['mwpolicy'] <= .05){
1043
+ layer1.pvals.mde['mwpolicy'] <- layer1.pvals.mde['mwpolicy'] + .001
1044
+ }
1045
+ pval.cutoff <- layer1.pvals.mde['mwpolicy']
1046
+ print('to achieve significance of policy attitude family at layer 1 (pooled test of any effect on policy index from any contrast) when correcting for multiple layer-1 hypothesis families, this is the minimum cutoff value after conducting simes correction of layer 2 pvals:')
1047
+ pval.cutoff
1048
+
1049
+ ## if layer-1 null was rejected for the policy outcome, then we would use this
1050
+ ## correction factor when interpreting layer-2 p-values (for specific contrasts)
1051
+ layer1.nonnull.prop.if.gt.cutoff <- mean(c(
1052
+ p.adjust(layer1.pvals.mde, 'BH')[c('platform', 'media', 'affpol')] < .05,
1053
+ TRUE
1054
+ ))
1055
+
1056
+ ## the sims below will only examine 3/1 vs 2/2 treatment contrasts, so we will
1057
+ ## hold fixed the layer-2 p-values that relate to seed contrasts
1058
+ pvals.for.seed.contrasts.on.policyindex <- layer2.pvals$mwpolicy[
1059
+ c('attitude.neutral:seed.pro:recsys.31.vs.attitude.neutral:seed.anti:recsys.31',
1060
+ 'attitude.neutral:seed.pro:recsys.22.vs.attitude.neutral:seed.anti:recsys.22'
1061
+ )
1062
+ ]
1063
+
1064
+
1065
+
1066
+ ## step 2: prepare simulations based on real data ------------------------------
1067
+
1068
+ mod.attitude.anti <- lm(
1069
+ mw_index_w2 ~ recsys.31 + mw_index_w1,
1070
+ data = d[attitude.anti == 1]
1071
+ )
1072
+ X.attitude.anti <- model.matrix(mod.attitude.anti)
1073
+ residual.sd.attitude.anti <- sd(resid(mod.attitude.anti))
1074
+ ## confirm that this recovers fitted values
1075
+ ## model.matrix(mod.attitude.anti) %*% coef(mod.attitude.anti)
1076
+ assert_that(all(
1077
+ predict(mod.attitude.anti) ==
1078
+ X.attitude.anti %*% coef(mod.attitude.anti)
1079
+ ))
1080
+ ## we will create simulated outcomes, given hypothesized treatment effect
1081
+ ## == intercept + <-- part A
1082
+ ## real coef * real pretreatment attitude + <-- part A
1083
+ ## hypothesized treatment effect * real treatment status + <-- part B
1084
+ ## rnorm(mean = 0, sd = real residual outcome sd) <-- part C
1085
+ ## A: generate fitted values under hypothesized effect size
1086
+ coef.attitude.anti.baseline <- coef(mod.attitude.anti)
1087
+ coef.attitude.anti.baseline['recsys.31'] <- 0
1088
+ Y.attitude.anti.baseline <-
1089
+ as.numeric(X.attitude.anti %*% coef.attitude.anti.baseline)
1090
+ ## C: will be added below with hypothesized effect * treatment
1091
+ ## B: will be drawn below with rnorm(mean=0, sd=residual_sd)
1092
+
1093
+ ## repeat above for respondents with pro attitude
1094
+ mod.attitude.pro <- lm(
1095
+ mw_index_w2 ~ recsys.31 + mw_index_w1,
1096
+ data = d[attitude.pro == 1]
1097
+ )
1098
+ X.attitude.pro <- model.matrix(mod.attitude.pro)
1099
+ residual.sd.attitude.pro <- sd(resid(mod.attitude.pro))
1100
+ coef.attitude.pro.baseline <- coef(mod.attitude.pro)
1101
+ coef.attitude.pro.baseline['recsys.31'] <- 0
1102
+ Y.attitude.pro.baseline <-
1103
+ as.numeric(X.attitude.pro %*% coef.attitude.pro.baseline)
1104
+
1105
+ ## repeat above for respondents with neutral attitude assigned to pro seed
1106
+ mod.attitude.neutral.seed.pro <- lm(
1107
+ mw_index_w2 ~ recsys.31 + mw_index_w1,
1108
+ data = d[attitude.neutral == 1 & seed.pro == 1]
1109
+ )
1110
+ X.attitude.neutral.seed.pro <- model.matrix(mod.attitude.neutral.seed.pro)
1111
+ residual.sd.attitude.neutral.seed.pro <- sd(resid(mod.attitude.neutral.seed.pro))
1112
+ coef.attitude.neutral.seed.pro.baseline <- coef(mod.attitude.neutral.seed.pro)
1113
+ coef.attitude.neutral.seed.pro.baseline['recsys.31'] <- 0
1114
+ Y.attitude.neutral.seed.pro.baseline <-
1115
+ as.numeric(X.attitude.neutral.seed.pro %*% coef.attitude.neutral.seed.pro.baseline)
1116
+
1117
+ ## repeat above for respondents with neutral attitude assigned to anti seed
1118
+ mod.attitude.neutral.seed.anti <- lm(
1119
+ mw_index_w2 ~ recsys.31 + mw_index_w1,
1120
+ data = d[attitude.neutral == 1 & seed.anti == 1]
1121
+ )
1122
+ X.attitude.neutral.seed.anti <- model.matrix(mod.attitude.neutral.seed.anti)
1123
+ residual.sd.attitude.neutral.seed.anti <- sd(resid(mod.attitude.neutral.seed.anti))
1124
+ coef.attitude.neutral.seed.anti.baseline <- coef(mod.attitude.neutral.seed.anti)
1125
+ coef.attitude.neutral.seed.anti.baseline['recsys.31'] <- 0
1126
+ Y.attitude.neutral.seed.anti.baseline <-
1127
+ as.numeric(X.attitude.neutral.seed.anti %*% coef.attitude.neutral.seed.anti.baseline)
1128
+
1129
+
1130
+
1131
+ ## step 3: conduct sims --------------------------------------------------------
1132
+
1133
+ sims.attitude.anti <- foreach(seed = params_sims$seed,
1134
+ effect = params_sims$effect,
1135
+ .combine = rbind
1136
+ ) %dopar%
1137
+ {
1138
+ set.seed(seed)
1139
+ Y <-
1140
+ Y.attitude.anti.baseline +
1141
+ effect * X.attitude.anti[, 'recsys.31'] +
1142
+ rnorm(
1143
+ n = nrow(X.attitude.anti),
1144
+ mean = 0,
1145
+ sd = residual.sd.attitude.anti
1146
+ )
1147
+ mod <- lm(Y ~ 0 + X.attitude.anti)
1148
+ smry <- coeftest(mod, vcovHC(mod))
1149
+ cbind(
1150
+ seed,
1151
+ effect,
1152
+ data.table(smry['X.attitude.antirecsys.31', , drop = FALSE])
1153
+ )
1154
+ }
1155
+
1156
+ sims.attitude.pro <- foreach(seed = params_sims$seed,
1157
+ effect = params_sims$effect,
1158
+ .combine = rbind
1159
+ ) %dopar%
1160
+ {
1161
+ set.seed(seed)
1162
+ Y <-
1163
+ Y.attitude.pro.baseline +
1164
+ effect * X.attitude.pro[, 'recsys.31'] +
1165
+ rnorm(
1166
+ n = nrow(X.attitude.pro),
1167
+ mean = 0,
1168
+ sd = residual.sd.attitude.pro
1169
+ )
1170
+ mod <- lm(Y ~ 0 + X.attitude.pro)
1171
+ smry <- coeftest(mod, vcovHC(mod))
1172
+ cbind(
1173
+ seed,
1174
+ effect,
1175
+ data.table(smry['X.attitude.prorecsys.31', , drop = FALSE])
1176
+ )
1177
+ }
1178
+
1179
+ sims.attitude.neutral.seed.anti <- foreach(seed = params_sims$seed,
1180
+ effect = params_sims$effect,
1181
+ .combine = rbind
1182
+ ) %dopar%
1183
+ {
1184
+ set.seed(seed)
1185
+ Y <-
1186
+ Y.attitude.neutral.seed.anti.baseline +
1187
+ effect * X.attitude.neutral.seed.anti[, 'recsys.31'] +
1188
+ rnorm(
1189
+ n = nrow(X.attitude.neutral.seed.anti),
1190
+ mean = 0,
1191
+ sd = residual.sd.attitude.neutral.seed.anti
1192
+ )
1193
+ mod <- lm(Y ~ 0 + X.attitude.neutral.seed.anti)
1194
+ smry <- coeftest(mod, vcovHC(mod))
1195
+ cbind(
1196
+ seed,
1197
+ effect,
1198
+ data.table(smry['X.attitude.neutral.seed.antirecsys.31', , drop = FALSE])
1199
+ )
1200
+ }
1201
+
1202
+ sims.attitude.neutral.seed.pro <- foreach(seed = params_sims$seed,
1203
+ effect = params_sims$effect,
1204
+ .combine = rbind
1205
+ ) %dopar%
1206
+ {
1207
+ set.seed(seed)
1208
+ Y <-
1209
+ Y.attitude.neutral.seed.pro.baseline +
1210
+ effect * X.attitude.neutral.seed.pro[, 'recsys.31'] +
1211
+ rnorm(
1212
+ n = nrow(X.attitude.neutral.seed.pro),
1213
+ mean = 0,
1214
+ sd = residual.sd.attitude.neutral.seed.pro
1215
+ )
1216
+ mod <- lm(Y ~ 0 + X.attitude.neutral.seed.pro)
1217
+ smry <- coeftest(mod, vcovHC(mod))
1218
+ cbind(
1219
+ seed,
1220
+ effect,
1221
+ data.table(smry['X.attitude.neutral.seed.prorecsys.31', , drop = FALSE])
1222
+ )
1223
+ }
1224
+
1225
+
1226
+
1227
+ ## step 4: analyze power results -----------------------------------------------
1228
+
1229
+ ## without multiple-testing corrections
1230
+
1231
+ print('mde for respondents with anti attitude (conventional analysis w/o correction):')
1232
+ sims.attitude.anti[,
1233
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1234
+ by = effect
1235
+ ][p.reject >= .8, min(effect)]
1236
+
1237
+ print('mde for respondents with pro attitude (conventional analysis w/o correction):')
1238
+ sims.attitude.pro[,
1239
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1240
+ by = effect
1241
+ ][p.reject >= .8, min(effect)]
1242
+
1243
+ print('mde for respondents with neutral attitude assigned to pro seed (conventional analysis w/o correction):')
1244
+ sims.attitude.neutral.seed.anti[,
1245
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1246
+ by = effect
1247
+ ][p.reject >= .8, min(effect)]
1248
+
1249
+ print('mde for respondents with neutral attitude assigned to anti seed (conventional analysis w/o correction):')
1250
+ sims.attitude.neutral.seed.pro[,
1251
+ .(p.reject = mean(`Pr(>|t|)` < .05)),
1252
+ by = effect
1253
+ ][p.reject >= .8, min(effect)]
1254
+
1255
+
1256
+
1257
+ ## with multiple testing correction
1258
+
1259
+ sims <- rbind(
1260
+ sims.attitude.anti,
1261
+ sims.attitude.pro,
1262
+ sims.attitude.neutral.seed.anti,
1263
+ sims.attitude.neutral.seed.pro
1264
+ )
1265
+
1266
+ sims.layer1 <- sims[
1267
+ ,
1268
+ .(pval.pooled = ifelse(
1269
+ ## if these results would lead us to reject layer-1 pooled null of no effect
1270
+ ## on policy attitudes from any treatment contrast
1271
+ simes(c(
1272
+ `Pr(>|t|)`,
1273
+ pvals.for.seed.contrasts.on.policyindex
1274
+ )) <= pval.cutoff,
1275
+ ## disaggregate layer-2 results report with procedure from above
1276
+ ## (BH correction, then inflate by 1/prop of layer-1 sig results)
1277
+ ## then subset to only those p-values relating to 3/1 vs 2/2 contrast
1278
+ ## to see if any are <.05 after full correction procedure
1279
+ yes = min(
1280
+ p.adjust(c(`Pr(>|t|)`, pvals.for.seed.contrasts.on.policyindex),
1281
+ 'BH'
1282
+ )[1:4] / layer1.nonnull.prop.if.gt.cutoff
1283
+ ),
1284
+ no = Inf
1285
+ )
1286
+ ),
1287
+ by = .(seed, effect)
1288
+ ]
1289
+ print('with multiple testing correction:')
1290
+ sims.layer1[, .(p.reject = mean(pval.pooled <= pval.cutoff)), by = effect]
1291
+ print('mde:')
1292
+ sims.layer1[,
1293
+ .(p.reject = mean(pval.pooled <= pval.cutoff)),
1294
+ by = effect
1295
+ ][p.reject >= .8, min(effect)]
code/run ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -ex
3
+
4
+ ## Study 1
5
+ Rscript 'gun control (issue 1)/01_trt_assign.R'
6
+ Rscript 'gun control (issue 1)/02_clean_merge.R'
7
+ Rscript 'gun control (issue 1)/03_analysis_multipletesting.R'
8
+
9
+ ## Study 2
10
+ Rscript 'minimum wage (issue 2)/01_trt_assign.R' # also covers study 3
11
+ Rscript 'minimum wage (issue 2)/02_clean_merge.R'
12
+ Rscript 'minimum wage (issue 2)/03_analysis_multipletesting.R'
13
+
14
+ ## Study 3 (YouGov)
15
+ Rscript 'minimum wage (issue 2)/02b_clean_merge_yg.R'
16
+ Rscript 'minimum wage (issue 2)/03b_analysis_multipletesting_yg.R'
17
+
18
+ ## Combined results from Studies 1-3
19
+ Rscript '04_postprocessing_exploration_issues12.R'
20
+
21
+ ## Study 4
22
+ Rscript 'shorts/05_clean_shorts_data.R' # returns demographics for the shorts (SI Fig1)
23
+ Rscript 'shorts/06_analysis_multipletesting.R'
24
+ Rscript 'shorts/07_postprocessing_exploration.R' # -- may not need the figure plotting section
25
+ Rscript 'shorts/08_plot_shorts_figure.R' # returns the main figure for the Shorts exp
26
+
27
+ ## Supplementary analyses
28
+
29
+ # (1) Experiment durations
30
+ # SI Fig S2 comes from this code.
31
+ python3 'supplemental/experiment durations/09_experiment_times.py'
32
+
33
+ # (2) Increasingly extreme recommendations
34
+ # SI Table S11, Fig S15 comes from this code
35
+ python3 'supplemental/increasingly extreme recommendations/10_partisanship_increase.py'
36
+
37
+ # SI Fig S12-13 comes from this code
38
+ python3 'supplemental/increasingly extreme recommendations/11_gpt_rating_plots.py'
39
+
40
+ # (3) Thumbnail ("First Impressions") analysis
41
+ python3 'supplemental/thumbnails (first impressions)/12_thumbnail_analysis.py'
42
+ python3 'supplemental/thumbnails (first impressions)/13_thumbnail_null_comparison.py'
43
+
44
+ # SI Fig S3 comes from this code.
45
+ Rscript 'supplemental/14_api_browser_comparison.R'
code/shorts/05_clean_shorts_data.R ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: shorts/05_clean_shorts_data.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ ## Extremizing Sequences and Minimum Wage Opinions
9
+ ## Data collected May 2024 via MTurk/CloudResearch
10
+ ## Analysis for the Extremizing Sequences Experiment
11
+
12
+ ## Preamble ----------------------------
13
+ library(tidyverse)
14
+ library(janitor)
15
+ library(lubridate)
16
+ library(stargazer)
17
+ library(broom)
18
+
19
+ # create a folder for the shorts intermediate data
20
+ dir.create("../results/intermediate data/shorts/", recursive = TRUE, showWarnings = FALSE)
21
+
22
+ # SURVEY DATA (FROM QUALTRICS)
23
+ a <- read_csv("../data/shorts/ytrecs_surveys_may2024.csv")[-c(1,2),] %>%
24
+ clean_names() # 1315 obs.
25
+
26
+ # DATE FILTER
27
+ a <- a %>% filter(start_date >= '2024-05-28') # 1032 obs.
28
+
29
+ # ATTENTION CHECK -- 932 obs.
30
+ a <- a %>% filter(a$q81 == "Quick and easy")
31
+ a <- a %>% filter(a$q82 == "wikiHow")
32
+ a <- a %>% filter(is.na(video_link) == FALSE) ## failed respondents don't have a valid link
33
+
34
+ # SURVEY TIME (ALL)
35
+ a <- a %>% mutate(start_date = as_datetime(start_date),
36
+ end_date = as_datetime(end_date),
37
+ survey_time = as.numeric(end_date-start_date))
38
+
39
+ summary(a$survey_time) # 5.5 mins to 74 mins (median 34 mins)
40
+
41
+ # DEMOGRAPHICS -------------------------------------------------
42
+
43
+ # GENDER, EDUCATION, INCOME
44
+ a <- a %>%
45
+ mutate(female = ifelse(gender == "Woman", 1, 0),
46
+ male = ifelse(gender == "Man", 1, 0),
47
+ black = ifelse(str_detect(race_ethnicity, "Black"), 1, 0),
48
+ white = ifelse(str_detect(race_ethnicity, "White"), 1, 0),
49
+ college = ifelse(str_detect(highest_education, "college ") | str_detect(highest_education, "Post"), 1, 0),
50
+ income_gt50k = ifelse(income %in% names(table(a$income))[c(2,3,5,10,11,12,13)], 1, 0)
51
+ )
52
+ a$income_gt50k[is.na(a$income)] <- NA
53
+
54
+ # PID
55
+ a <- a %>%
56
+ mutate(pid = case_when(pid1=="Democrat" ~ -1,
57
+ pid1=="Republican" ~ 1,
58
+ pid4=="Closer to the Republican Party" ~ 1,
59
+ pid4=="Closer to the Democratic Party" ~ -1,
60
+ pid4=="Neither" ~ 0))
61
+
62
+ tabyl(a,pid)
63
+
64
+ # IDEO
65
+ a <- a %>%
66
+ mutate(ideo = case_when(ideo1=="Liberal" ~ -1,
67
+ ideo1=="Conservative" ~ 1,
68
+ ideo4=="Closer to conservatives" ~ 1,
69
+ ideo4=="Closer to liberals" ~ -1,
70
+ ideo4=="Neither" ~ 0))
71
+
72
+ tabyl(a,ideo)
73
+
74
+ # AGE
75
+ a$age <- 2024-as.numeric(a$year_born)
76
+
77
+ # AGE CATEGORIES: 18-29; 30-44; 45-64; 65+
78
+ a <- a %>%
79
+ mutate(age_cat = case_when(age>=18 & age<=29 ~ "18-29",
80
+ age>=30 & age<=44 ~ "30-44",
81
+ age>=45 & age<=64 ~ "45-64",
82
+ age>=65 ~ "65+"
83
+ ))
84
+ a <- a %>%
85
+ fastDummies::dummy_cols(select_columns = "age_cat",remove_selected_columns = F)
86
+
87
+ # POLITICAL INTEREST AND YOUTUBE FREQUENCY RECODING
88
+ a <- a %>%
89
+ mutate(pol_interest = dplyr::recode(political_interest,"Extremely interested"=5,"Very interested"=4,"Somewhat interested"=3,"Not very interested"=2,"Not at all interested"=1),
90
+ freq_youtube = dplyr::recode(youtube_time,"More than 3 hours per day"=6,"2–3 hours per day"=5,"1–2 hours per day"=4,"31–59 minutes per day"=3,"10–30 minutes per day"=2,"Less than 10 minutes per day"=1,"None"=0)
91
+ )
92
+
93
+ # SUMMARY TABLE FOR DEMOGRAPHICS
94
+ summary_tab <- a %>%
95
+ dplyr::summarize(female = mean(female,na.rm=T),
96
+ white = mean(white,na.rm=T),
97
+ black = mean(black,na.rm=T),
98
+ age1829 = mean(`age_cat_18-29`,na.rm=T),
99
+ age3044 = mean(`age_cat_30-44`,na.rm=T),
100
+ age4564 = mean(`age_cat_45-64`,na.rm=T),
101
+ age65p = mean(`age_cat_65+`,na.rm=T),
102
+ college = mean(college,na.rm=T),
103
+ income_gt50k = mean(income_gt50k,na.rm=T),
104
+ democrat = mean(pid==-1,na.rm=T),
105
+ republican = mean(pid==1,na.rm=T))
106
+
107
+ summary_tab <- pivot_longer(summary_tab,
108
+ cols=c(female,
109
+ white,
110
+ black,
111
+ age1829,
112
+ age3044,
113
+ age4564,
114
+ age65p,
115
+ college,
116
+ income_gt50k,
117
+ democrat,
118
+ republican),
119
+ names_to = "outcome",values_to = "survey_avg")
120
+ outcome_labels <- data.frame(outcome_pretty = c("Female",
121
+ "White",
122
+ "Black",
123
+ "Age 18-29",
124
+ "Age 30-44",
125
+ "Age 45-64",
126
+ "Age 65+",
127
+ "College educated",
128
+ "Income >$50k",
129
+ "Democrat",
130
+ "Republican"),
131
+ outcome = c("female",
132
+ "white",
133
+ "black",
134
+ "age1829",
135
+ "age3044",
136
+ "age4564",
137
+ "age65p",
138
+ "college",
139
+ "income_gt50k",
140
+ "democrat",
141
+ "republican"))
142
+
143
+ summary_tab$outcome_pretty <- outcome_labels$outcome_pretty[match(summary_tab$outcome,outcome_labels$outcome)]
144
+ summary_tab <- summary_tab %>%
145
+ mutate(outcome_pretty = factor(outcome_pretty,levels = c("Republican",
146
+ "Democrat",
147
+ "Income >$50k",
148
+ "College educated",
149
+ "Age 65+",
150
+ "Age 45-64",
151
+ "Age 30-44",
152
+ "Age 18-29",
153
+ "Female",
154
+ "Black",
155
+ "White"),ordered=T))
156
+
157
+ # DEMOGRAPHICS DESCRIPTIVE FIGURE
158
+ (descrip_fig <- ggplot(summary_tab) +
159
+ geom_point(aes(y=outcome_pretty,x=survey_avg)) +
160
+ geom_text(aes(y=outcome_pretty,x=survey_avg,label=paste0(round(100*survey_avg,0),"%")),nudge_x = 0.1) +
161
+ scale_y_discrete("") +
162
+ scale_x_continuous("",labels=scales::percent_format(),limits=c(0,1)) +
163
+ theme_bw()
164
+ )
165
+ ggsave(descrip_fig,filename = "../results/shorts_demographics.pdf",height=5,width=4)
166
+
167
+
168
+ ### DEMOGRAPHICS DONE ###
169
+
170
+ #### OUTCOMES ####
171
+
172
+ ##### POLICY OPINIONS #####
173
+
174
+ # convert to numeric unit scale:
175
+ a <- a %>%
176
+ mutate( # higher = more conservative or anti-min wage
177
+ minwage15_pre = dplyr::recode(minwage15_pre,"Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
178
+ rtwa_v1_pre = dplyr::recode(rtwa_v1_pre, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
179
+ rtwa_v2_pre = dplyr::recode(rtwa_v2_pre, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
180
+ mw_support_pre = dplyr::recode(mw_support_pre, "Strongly oppose raising the minimum wage"=4,"Somewhat oppose raising the minimum wage"=3,"Neither support nor oppose raising the minimum wage"=2,"Somewhat support raising the minimum wage"=1,"Strongly support raising the minimum wage"=0)/4,
181
+ minwage_howhigh_pre = dplyr::recode(minwage_howhigh_pre, "Much lower than the current level"=4,"Somewhat lower than the current level"=3,"About the current level"=2,"Somewhat higher than the current level"=1,"Much higher than the current level"=0)/4,
182
+ mw_help_pre_1 = dplyr::recode(mw_help_pre_1, "10"=9,"9"=8,"8"=7,"7"=6,"6"=5,"5"=4,"4"=3,"3"=2,"2"=1,"1"=0)/9,
183
+ mw_restrict_pre_1 = dplyr::recode(mw_restrict_pre_1, "1"=9,"2"=8,"3"=7,"4"=6,"5"=5,"6"=4,"7"=3,"8"=2,"9"=1,"10"=0)/9,
184
+ minwage_text_r_pre = (25-as.numeric(minwage_text_pre))/25,
185
+ )
186
+ a$minwage_text_r_pre[as.numeric(a$minwage_text_pre)>25] <- NA
187
+
188
+
189
+ a <- a %>%
190
+ rowwise() %>%
191
+ mutate(mw_index_pre = mean(c(minwage15_pre, rtwa_v1_pre,
192
+ rtwa_v2_pre, mw_support_pre,
193
+ minwage_howhigh_pre, mw_help_pre_1,
194
+ mw_restrict_pre_1, minwage_text_r_pre), na.rm=T)) %>%
195
+ ungroup()
196
+
197
+
198
+ # CRONBACH'S ALPHA
199
+ index_fa <- psych::alpha(select(a, minwage15_pre, rtwa_v1_pre,
200
+ rtwa_v2_pre, mw_support_pre, minwage_howhigh_pre,
201
+ mw_help_pre_1, mw_restrict_pre_1, minwage_text_r_pre), check.keys = TRUE)
202
+
203
+ write.csv(data.frame(cor(select(a, minwage15_pre, rtwa_v1_pre, rtwa_v2_pre,
204
+ mw_support_pre, minwage_howhigh_pre, mw_help_pre_1,
205
+ mw_restrict_pre_1, minwage_text_r_pre), use = "complete.obs")),
206
+ row.names = T,file = "../results/cormat_mwindex_w1.csv")
207
+
208
+ # CORRELATION PLOT PRE-MINIMUM WAGE OPINION
209
+ pdf("corrplot_mwindex_w1.pdf")
210
+ w1_corrplot <- corrplot::corrplot(cor(select(a, minwage15_pre, rtwa_v1_pre, rtwa_v2_pre,
211
+ mw_support_pre, minwage_howhigh_pre, mw_help_pre_1,
212
+ mw_restrict_pre_1, minwage_text_r_pre),
213
+ use = "complete.obs"),method = "shade")
214
+ dev.off()
215
+
216
+ (alpha <- index_fa$total["raw_alpha"]) # 0.9407615
217
+ writeLines(as.character(round(alpha,2)),con = "../results/outcomes_alpha_w1_mturk.tex",sep = "%")
218
+
219
+ tabyl(a,mw_index_pre)
220
+
221
+ ##### MEDIA TRUST #####
222
+ a <- a %>%
223
+ mutate( # higher = more trusting
224
+ trust_majornews = dplyr::recode(info_trust_1,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
225
+ trust_localnews = dplyr::recode(info_trust_2,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
226
+ trust_social = dplyr::recode(info_trust_3,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
227
+ trust_youtube = dplyr::recode(info_trust_4,"A lot"=3,"Some"=2,"Not too much"=1,"Not at all"=0)/3,
228
+ fabricate_majornews = dplyr::recode(mainstream_fakenews,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4,
229
+ fabricate_youtube = dplyr::recode(youtube_fakenews,"Never"=4,"Once in a while"=3,"About half the time"=2,"Most of the time"=1,"All the time"=0)/4
230
+ ) %>%
231
+ rowwise() %>%
232
+ mutate(media_trust = mean(trust_majornews,trust_localnews,fabricate_majornews,na.rm=T)) %>%
233
+ ungroup()
234
+
235
+ media_trust_fa <- psych::alpha(select(a, trust_majornews,trust_localnews,fabricate_majornews),
236
+ check.keys = TRUE)
237
+ (alpha <- media_trust_fa$total["raw_alpha"]) #. 0.7698292
238
+
239
+ ##### AFFECTIVE POLARIZATION #####
240
+ a %>%
241
+ group_by(pid) %>%
242
+ summarize(mean_2=mean(as.numeric(political_lead_feels_2),na.rm=T), # Trump
243
+ mean_5=mean(as.numeric(political_lead_feels_5),na.rm=T), # Biden
244
+ mean_11=mean(as.numeric(political_lead_feels_11),na.rm=T), # dems
245
+ mean_12=mean(as.numeric(political_lead_feels_12),na.rm=T)) # reps
246
+
247
+ a <- a %>%
248
+ mutate( # higher = more trusting
249
+ smart_dems = dplyr::recode(democrat_smart, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
250
+ smart_reps = dplyr::recode(republican_smart, "Extremely"=4,"Very"=3,"Somewhat"=2,"A little"=1,"Not at all"=0)/4,
251
+ comfort_dems = dplyr::recode(democrat_friends,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
252
+ comfort_reps = dplyr::recode(republican_friends,"Extremely comfortable"=3,"Somewhat comfortable"=2,"Not too comfortable"=1,"Not at all comfortable"=0)/3,
253
+ affpol_smart = case_when(
254
+ pid==-1 ~ smart_dems-smart_reps,
255
+ pid==1 ~ smart_reps-smart_dems
256
+ ),
257
+ affpol_comfort = case_when(
258
+ pid==-1 ~ comfort_dems-comfort_reps,
259
+ pid==1 ~ comfort_reps-comfort_dems
260
+ )
261
+ )
262
+
263
+ # Create a new variable 'thirds' based on attributes
264
+ a$thirds <- ifelse(!is.na(a$liberals_do) & is.na(a$moderates_do) & is.na(a$conservatives_do), 1,
265
+ ifelse(is.na(a$liberals_do) & !is.na(a$moderates_do) & is.na(a$conservatives_do), 2,
266
+ ifelse(is.na(a$liberals_do) & is.na(a$moderates_do) & !is.na(a$conservatives_do), 3, NA)))
267
+
268
+ tabyl(a$thirds)
269
+
270
+ #### OUTCOMES ####
271
+
272
+ ##### POLICY OPINIONS ######
273
+ # convert to numeric unit scale:
274
+ a <- a %>%
275
+ mutate( # higher = more pro-gun
276
+ minwage15 = dplyr::recode(minwage15,"Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
277
+ rtwa_v1 = dplyr::recode(rtwa_v1_updated, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
278
+ rtwa_v2 = dplyr::recode(rtwa_v2_updated, "Strongly oppose"=4,"Somewhat oppose"=3,"Neither support nor oppose"=2,"Somewhat support"=1,"Strongly support"=0)/4,
279
+ mw_support = dplyr::recode(mw_support, "Strongly oppose raising the minimum wage"=4,"Somewhat oppose raising the minimum wage"=3,"Neither support nor oppose raising the minimum wage"=2,"Somewhat support raising the minimum wage"=1,"Strongly support raising the minimum wage"=0)/4,
280
+ minwage_howhigh = dplyr::recode(minwage_howhigh, "Much lower than the current level"=4,"Somewhat lower than the current level"=3,"About the current level"=2,"Somewhat higher than the current level"=1,"Much higher than the current level"=0)/4,
281
+ mw_help_1 = dplyr::recode(mw_help_1, "10"=9,"9"=8,"8"=7,"7"=6,"6"=5,"5"=4,"4"=3,"3"=2,"2"=1,"1"=0)/9,
282
+ mw_restrict_1 = dplyr::recode(mw_restrict_1, "1"=9,"2"=8,"3"=7,"4"=6,"5"=5,"6"=4,"7"=3,"8"=2,"9"=1,"10"=0)/9,
283
+ minwage_text_r = (25-as.numeric(minwage_text))/25,
284
+ )
285
+ a$minwage_text_r[as.numeric(a$minwage_text)>25] <- NA
286
+
287
+ a <- a %>%
288
+ rowwise() %>%
289
+ mutate(mw_index = mean(c(minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh,
290
+ mw_help_1, mw_restrict_1, minwage_text_r), na.rm=T)) %>%
291
+ ungroup()
292
+
293
+ # CRONBACH-S ALPHA
294
+ index_fa <- psych::alpha(select(a, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh,
295
+ mw_help_1, mw_restrict_1, minwage_text_r), check.keys = T)
296
+
297
+ write.csv(data.frame(cor(select(a, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh,
298
+ mw_help_1, mw_restrict_1, minwage_text_r), use = "complete.obs")),
299
+ row.names = T,file = "../results/cormat_mw_index_w2.csv")
300
+
301
+ pdf("corrplot_mwindex_w2.pdf")
302
+ a_corrplot <- corrplot::corrplot(cor(select(a, minwage15, rtwa_v1, rtwa_v2, mw_support,
303
+ minwage_howhigh, mw_help_1, mw_restrict_1, minwage_text_r),
304
+ use = "complete.obs"),method = "shade")
305
+ dev.off()
306
+
307
+ (alpha <- index_fa$total["raw_alpha"]) # 0.9582061
308
+
309
+ ### SURVEY PREPROCESSING DONE ###
310
+
311
+ ## YTRECS SESSION DATA -------------------------------------------------------
312
+ ytrecs <- read_rds("../data/shorts/ytrecs_sessions_may2024.rds") %>%
313
+ clean_names() %>%
314
+ as_tibble()
315
+
316
+ ## EXTRACTING TOPICID AND URLID
317
+ a <- a %>%
318
+ ungroup() %>%
319
+ mutate(
320
+ topic_id = str_extract(video_link, "topicid=([a-z]{2}[1-6])") %>% str_replace("topicid=", ""),
321
+ urlid = str_extract(video_link, "id=(mt_\\d+)") %>% str_replace("id=", "")
322
+ )
323
+
324
+ ## USING THE FIRST SESSION AS THE VALID ONE IF A PERSON HAS MULTIPLE ATTEMPTS
325
+ ytrecs <- ytrecs %>%
326
+ group_by(topic_id, urlid) %>%
327
+ mutate(dupes = n(),
328
+ first_session = ifelse(row_number() == 1, 1, 0)
329
+ ) %>%
330
+ filter(first_session == 1) # using the first session as valid one
331
+
332
+ a <- left_join(a, ytrecs,by=c("topic_id","urlid"))
333
+
334
+ ## EXTRACTING TREATMENT ARM
335
+ extract_treatmentarm <- function(url) {
336
+ pattern <- "topicid=([a-z]{2})" #[a-z]{2}[1-6]
337
+ match <- str_match(url, pattern)
338
+ if (!is.na(match[2])) {
339
+ return(match[2])
340
+ } else {
341
+ return(NA)
342
+ }
343
+ }
344
+
345
+ # APPLY THE FUNCTION TO THE VIDEO_LINK COLUMN
346
+ a <- a %>%
347
+ rowwise() %>%
348
+ mutate(treatment_arm = extract_treatmentarm(video_link)) %>%
349
+ ungroup()
350
+
351
+ write_csv(a, "../results/intermediate data/shorts/qualtrics_w12_clean_ytrecs_may2024.csv")
352
+ rm(list = ls())
353
+
354
+ ### PREPROCESSING DONE ----------------------
code/shorts/06_analysis_multipletesting.R ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: shorts/06_analysis_multipletesting.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ library(data.table)
9
+ library(car)
10
+ library(sandwich)
11
+ library(lmtest)
12
+ library(ggplot2)
13
+ library(tidyverse)
14
+
15
+ ###############
16
+ ## functions ##
17
+ ###############
18
+
19
+ `%.%` <- paste0
20
+
21
+ simes <- function(ps){
22
+ min(sort(length(ps) * ps / rank(ps)))
23
+ }
24
+
25
+ ### functions to handle inconsistent interaction ordering of mlm() ###
26
+
27
+ ## convert interaction terms of form 'b#:a#' to 'a#:b#'
28
+ reorder.interaction.names <- function(x, prefix = ''){
29
+ x <- gsub('^' %.% prefix, '', x)
30
+ sapply(strsplit(x, ':'),
31
+ function(y){
32
+ paste(sort(y), collapse = ':')
33
+ })
34
+ }
35
+
36
+ ## take term of form 'a1:b1', look up in vector of form 'b#:a#, return 'b1:a1'
37
+ convert.interaction.names <- function(x, y, prefix.y = ''){
38
+ ind <- match(reorder.interaction.names(x),
39
+ reorder.interaction.names(y, prefix = prefix.y)
40
+ )
41
+ return(y[ind])
42
+ }
43
+
44
+ ## modified from print.linearHypothesis.mlm to use alternate df & return pvals
45
+ ## (print method is responsible for doing the actual computation of pvals)
46
+ extract.lht <- function(x,
47
+ SSP = TRUE,
48
+ SSPE = SSP,
49
+ digits = getOption('digits'),
50
+ df.residual = x$df.residual
51
+ ){
52
+ test <- x$test
53
+ if (!is.null(x$P) && SSP) {
54
+ P <- x$P
55
+ cat("\n Response transformation matrix:\n")
56
+ attr(P, "assign") <- NULL
57
+ attr(P, "contrasts") <- NULL
58
+ print(P, digits = digits)
59
+ }
60
+ if (SSP) {
61
+ cat("\nSum of squares and products for the hypothesis:\n")
62
+ print(x$SSPH, digits = digits)
63
+ }
64
+ if (SSPE) {
65
+ cat("\nSum of squares and products for error:\n")
66
+ print(x$SSPE, digits = digits)
67
+ }
68
+ if ((!is.null(x$singular)) && x$singular) {
69
+ warning("the error SSP matrix is singular; multivariate tests are unavailable")
70
+ return(invisible(x))
71
+ }
72
+ SSPE.qr <- qr(x$SSPE)
73
+ eigs <- Re(eigen(qr.coef(SSPE.qr, x$SSPH), symmetric = FALSE)$values)
74
+ tests <- matrix(NA, 4, 4)
75
+ rownames(tests) <- c("Pillai", "Wilks", "Hotelling-Lawley",
76
+ "Roy")
77
+ if ("Pillai" %in% test)
78
+ tests[1, 1:4] <- car:::Pillai(eigs, x$df, df.residual)
79
+ if ("Wilks" %in% test)
80
+ tests[2, 1:4] <- car:::Wilks(eigs, x$df, df.residual)
81
+ if ("Hotelling-Lawley" %in% test)
82
+ tests[3, 1:4] <- car:::HL(eigs, x$df, df.residual)
83
+ if ("Roy" %in% test)
84
+ tests[4, 1:4] <- car:::Roy(eigs, x$df, df.residual)
85
+ tests <- na.omit(tests)
86
+ ok <- tests[, 2] >= 0 & tests[, 3] > 0 & tests[, 4] > 0
87
+ ok <- !is.na(ok) & ok
88
+ tests <- cbind(x$df, tests, pf(tests[ok, 2], tests[ok, 3],
89
+ tests[ok, 4], lower.tail = FALSE))
90
+ colnames(tests) <- c("Df", "test stat", "approx F", "num Df",
91
+ "den Df", "Pr(>F)")
92
+ tests <- structure(as.data.frame(tests),
93
+ heading = paste("\nMultivariate Test",
94
+ if (nrow(tests) > 1)
95
+ "s", ": ", x$title, sep = ""),
96
+ class = c("anova",
97
+ "data.frame"
98
+ )
99
+ )
100
+ return(tests)
101
+ }
102
+
103
+ ###############
104
+ ## load data ##
105
+ ###############
106
+
107
+ d <- fread('../results/intermediate data/shorts/qualtrics_w12_clean_ytrecs_may2024.csv')
108
+
109
+ ##############
110
+ ## controls ##
111
+ ##############
112
+
113
+ platform.controls <- c('age_cat',
114
+ 'male',
115
+ 'pol_interest',
116
+ 'freq_youtube')
117
+
118
+ mwpolicy.controls <- 'mw_index_pre'
119
+
120
+ media.controls <- c('trust_majornews',
121
+ 'trust_youtube',
122
+ 'fabricate_majornews',
123
+ 'fabricate_youtube')
124
+
125
+ affpol.controls <- c('affpol_smart',
126
+ 'affpol_comfort')
127
+
128
+ controls.raw <- unique(c(platform.controls,
129
+ mwpolicy.controls,
130
+ media.controls,
131
+ affpol.controls))
132
+
133
+ ## transform control variables by creating dummies and demeaning
134
+ controls.trans <- list()
135
+ for (j in controls.raw){
136
+ ## convert to dummies if needed
137
+ controls.j <- model.matrix(as.formula('~ 0 + ' %.% j),
138
+ model.frame(as.formula('~ 0 + ' %.% j),
139
+ data = d,
140
+ na.action = 'na.pass'
141
+ )
142
+ )
143
+ ## demean by column
144
+ controls.j <- sweep(controls.j,
145
+ MARGIN = 2,
146
+ STATS = colMeans(controls.j, na.rm = TRUE),
147
+ FUN = `-`,
148
+ )
149
+ colnames(controls.j) <- make.names(colnames(controls.j))
150
+ ## remove control from original data
151
+ d[[j]] <- NULL
152
+ ## reinsert transformed control
153
+ d <- cbind(d, controls.j)
154
+ ## keep track of which original controls map to which transformed controls
155
+ controls.trans[[j]] <- colnames(controls.j)
156
+ }
157
+
158
+ ## map original control variables to transformed versions
159
+ platform.controls <- unlist(controls.trans[platform.controls])
160
+ mwpolicy.controls <- unlist(controls.trans[mwpolicy.controls])
161
+ media.controls <- unlist(controls.trans[media.controls])
162
+ affpol.controls <- unlist(controls.trans[affpol.controls])
163
+
164
+ ### Platform interactions ###
165
+ d <- d %>% filter(!is.na(interface_duration)) # -- 929 observations
166
+
167
+ ##############
168
+ ## outcomes ##
169
+ ##############
170
+
171
+ ### HYPOTHESIS FAMILY: MIN WAGE POLICY ATTITUDES ###
172
+
173
+ ## ONLY HAVE ONE OUTCOME
174
+ mwpolicy.outcomes <- 'mw_index'
175
+
176
+ outcomes <- unique(c(mwpolicy.outcomes))
177
+
178
+ ################
179
+ ## treatments ##
180
+ ################
181
+
182
+ ## CREATE ATTITUDE DUMMIES
183
+ # 1-LIBERALS, 2-MODERATES, 3-CONSERVATIVES
184
+ d[, attitude := c('pro', 'neutral', 'anti')[thirds]]
185
+ d[, attitude.pro := as.numeric(attitude == 'pro')]
186
+ d[, attitude.neutral := as.numeric(attitude == 'neutral')]
187
+ d[, attitude.anti := as.numeric(attitude == 'anti')]
188
+
189
+ ## CREATE SEQUENCE DUMMIES -- AC, PC, AI, PI
190
+ d[, recsys.ac := as.numeric(treatment_arm %like% 'ac')]
191
+ d[, recsys.pc := as.numeric(treatment_arm %like% 'pc')]
192
+ d[, recsys.ai := as.numeric(treatment_arm %like% 'ai')]
193
+ d[, recsys.pi := as.numeric(treatment_arm %like% 'pi')]
194
+
195
+ # (a) Increasing vs. Constant assignment among Pro participants;
196
+ # (b) Increasing vs. Constant assignment among Anti participants;
197
+ # (c) Increasing vs. Constant assignment among Moderate participants assigned to a Prosequence;
198
+ # (d) Increasing vs. Constant assignment among moderate participants assigned to an Antisequence;
199
+ # (e) Pro vs. Anti sequence assignment among moderate participants with Increasing assignment;
200
+ # (f) Pro vs. Anti seed among moderate participants with Constant assignment.
201
+
202
+ # Treatments:
203
+ treatments <- c('attitude.pro:recsys.pi', # (a)
204
+ 'attitude.pro:recsys.pc', # (a)
205
+ 'attitude.anti:recsys.ai', # (b)
206
+ 'attitude.anti:recsys.ac', # (b)
207
+ 'attitude.neutral:recsys.ai', # (d-e)
208
+ 'attitude.neutral:recsys.pi', # (c-e)
209
+ 'attitude.neutral:recsys.ac', # (d-f)
210
+ 'attitude.neutral:recsys.pc') # (c-f)
211
+
212
+ # Contrasts:
213
+ contrasts <- rbind(
214
+ # Increasing vs. Constant assignment among Pro participants
215
+ i = c(treat = 'attitude.pro:recsys.pi',
216
+ ctrl = 'attitude.pro:recsys.pc'
217
+ ),
218
+ # Increasing vs. Constant assignment among Anti participants
219
+ ii = c(treat = 'attitude.anti:recsys.ai',
220
+ ctrl = 'attitude.anti:recsys.ac'
221
+ ),
222
+ # Increasing vs. Constant assignment among Moderate participants assigned to a Pro sequence
223
+ iii = c(treat = 'attitude.neutral:recsys.pi',
224
+ ctrl = 'attitude.neutral:recsys.pc'
225
+ ),
226
+ # Increasing vs. Constant assignment among moderate participants assigned to an Anti sequence
227
+ iv = c(treat = 'attitude.neutral:recsys.ai',
228
+ ctrl = 'attitude.neutral:recsys.ac'
229
+ ),
230
+ # Pro vs. Anti sequence assignment among moderate participants with Increasing assignment
231
+ v = c(treat = 'attitude.neutral:recsys.ai',
232
+ ctrl = 'attitude.neutral:recsys.pi'
233
+ ),
234
+ # Pro vs. Anti sequence assignment among moderate participants with Constant assignment
235
+ vi = c(treat = 'attitude.neutral:recsys.ac',
236
+ ctrl = 'attitude.neutral:recsys.pc'
237
+ )
238
+ )
239
+
240
+ ##########################
241
+ ## hierarchical testing ##
242
+ ##########################
243
+
244
+ ## initialize top layer p-values:
245
+ ## does treatment have any effect on any outcome in family
246
+ families <- c('mwpolicy')
247
+ layer1.pvals <- rep(NA_real_, length(families))
248
+ layer1.notes <- rep('', length(families))
249
+ names(layer1.pvals) <- families
250
+
251
+ ## initialize 2nd layer p-values:
252
+ ## which treatment has detectable effect?
253
+ contrast.pvals <- rep(NA_real_, nrow(contrasts))
254
+ names(contrast.pvals) <- paste(contrasts[, 'treat'],
255
+ contrasts[, 'ctrl'],
256
+ sep = '.vs.'
257
+ )
258
+ layer2.pvals <- list( mwpolicy = contrast.pvals)
259
+ rm(contrast.pvals)
260
+
261
+ ## initialize 3rd layer p-values:
262
+ ## on which specific outcome in family?
263
+ layer3.pvals <- list()
264
+ layer3.ests <- list()
265
+ layer3.ses <- list()
266
+ layer3.notes <- list()
267
+ for (i in 1:length(families)){
268
+ family <- families[i]
269
+ layer3.pvals[[family]] <- list()
270
+ layer3.ests[[family]] <- list()
271
+ layer3.ses[[family]] <- list()
272
+ layer3.notes[[family]] <- list()
273
+ outcomes <- get(family %.% '.outcomes')
274
+ for (j in 1:nrow(contrasts)){
275
+ contrast <- paste(contrasts[j, 'treat'],
276
+ contrasts[j, 'ctrl'],
277
+ sep = '.vs.'
278
+ )
279
+ layer3.pvals[[family]][[contrast]] <- numeric(0)
280
+ layer3.ests[[family]][[contrast]] <- numeric(0)
281
+ layer3.ses[[family]][[contrast]] <- numeric(0)
282
+ for (k in 1:length(outcomes)){
283
+ outcome <- outcomes[k]
284
+ layer3.pvals[[family]][[contrast]][outcome] <- NA_real_
285
+ layer3.ests[[family]][[contrast]][outcome] <- NA_real_
286
+ layer3.ses[[family]][[contrast]][outcome] <- NA_real_
287
+ layer3.notes[[family]][outcome] <- ''
288
+ }
289
+ }
290
+ }
291
+
292
+ ### begin nested analyses ###
293
+ for (i in 1:length(families)){
294
+
295
+ family <- families[i]
296
+ family.outcomes <- get(family %.% '.outcomes')
297
+ family.controls <- get(family %.% '.controls')
298
+
299
+
300
+ family.controls.interactions <- as.character(
301
+ outer(treatments,
302
+ family.controls,
303
+ FUN = function(x, y) x %.% ':' %.% y
304
+ )
305
+ )
306
+
307
+ family.formula <-
308
+ 'cbind(' %.% # outcomes
309
+ paste(family.outcomes,
310
+ collapse = ', '
311
+ ) %.% ') ~\n0 +\n' %.%
312
+ paste(treatments, # treatments (base terms)
313
+ collapse = ' +\n'
314
+ ) %.% ' +\n' %.%
315
+ paste(family.controls, # controls (base terms)
316
+ collapse = ' +\n'
317
+ )## %.% ' +\n' %.%
318
+ ## paste( # treat-ctrl interactions
319
+ ## family.controls.interactions,
320
+ ## collapse = ' +\n'
321
+ ## )
322
+
323
+ cat(rep('=', 80),
324
+ '\n\nHYPOTHESIS FAMILY: ',
325
+ family,
326
+ '\n\nrunning mlm:\n\n',
327
+ family.formula,
328
+ '\n\n',
329
+ sep = ''
330
+ )
331
+
332
+ ## run model
333
+ family.mod <- lm(family.formula, d)
334
+
335
+ ## hack to eliminate NA coefs
336
+ if (any(is.na(coef(family.mod)))){
337
+ if ('mlm' %in% class(family.mod)){
338
+ drop <- rownames(coef(family.mod))[is.na(coef(family.mod))[, 1]]
339
+ } else {
340
+ drop <- names(coef(family.mod))[is.na(coef(family.mod))]
341
+ }
342
+ drop <- convert.interaction.names(drop,
343
+ c(family.controls,
344
+ family.controls.interactions
345
+ )
346
+ )
347
+ layer1.notes[[i]] <-
348
+ layer1.notes[[i]] %.%
349
+ 'dropped the following coefs: ' %.%
350
+ paste(drop, sep = ', ') %.%
351
+ '\n\n'
352
+ family.formula <- gsub(
353
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
354
+ '',
355
+ family.formula
356
+ )
357
+ family.mod <- lm(family.formula, d)
358
+ }
359
+
360
+ family.vcov <- vcovHC(family.mod)
361
+ if (is.null(dim(coef(family.mod)))){
362
+ coef.names <- names(coef(family.mod))
363
+ } else {
364
+ coef.names <- rownames(coef(family.mod))
365
+ }
366
+
367
+ ### top layer: test overall significance of all contrasts on all outcomes ###
368
+ ## convert interaction terms to whatever mlm() named it
369
+ treats <- convert.interaction.names(contrasts[, 'treat'], coef.names)
370
+ ctrls <- convert.interaction.names(contrasts[, 'ctrl'], coef.names)
371
+
372
+ ## test jointly
373
+ lht.attempt <- tryCatch({
374
+ if ('mlm' %in% class(family.mod)){
375
+ contrast.lht <- linearHypothesis(
376
+ family.mod,
377
+ vcov. = family.vcov,
378
+ hypothesis.matrix = sprintf('%s - %s', treats, ctrls),
379
+ rhs = matrix(0, nrow = nrow(contrasts), ncol = length(family.outcomes)),
380
+ test = 'Pillai'
381
+ )
382
+ layer1.pvals[[i]] <- extract.lht(contrast.lht)[, 'Pr(>F)']
383
+ } else {
384
+ contrast.lht <- linearHypothesis(
385
+ family.mod,
386
+ vcov. = family.vcov,
387
+ hypothesis.matrix = sprintf('%s - %s', treats, ctrls),
388
+ rhs = matrix(0, nrow = nrow(contrasts), ncol = length(family.outcomes)),
389
+ test = 'F'
390
+ )
391
+ layer1.pvals[[i]] <- contrast.lht[['Pr(>F)']][2]
392
+ }
393
+ },
394
+ error = function(e){
395
+ warning(sprintf('caught error in %s family:', family), e)
396
+ ## return error as string for inclusion in notes
397
+ 'caught error: ' %.%
398
+ e %.%
399
+ '\n\n'
400
+ })
401
+ if (lht.attempt %like% 'caught error'){
402
+ layer1.notes[[i]] <-
403
+ layer1.notes[[i]] %.% lht.attempt
404
+ }
405
+
406
+ ### layer 2: test each contrast individually on all outcomes ###
407
+
408
+ for (j in 1:nrow(contrasts)){
409
+ ## test group equality on all outcomes
410
+ if ('mlm' %in% class(family.mod)){
411
+ contrast.lht <-
412
+ linearHypothesis(
413
+ family.mod,
414
+ vcov. = family.vcov,
415
+ hypothesis.matrix = sprintf('%s - %s', treats[j], ctrls[j]),
416
+ rhs = matrix(0, nrow = 1, ncol = length(family.outcomes)),
417
+ test = 'Pillai'
418
+ )
419
+ layer2.pvals[[i]][j] <- extract.lht(contrast.lht)[, 'Pr(>F)']
420
+ } else {
421
+ contrast.lht <- linearHypothesis(
422
+ family.mod,
423
+ vcov. = family.vcov,
424
+ hypothesis.matrix = sprintf('%s - %s', treats[j], ctrls[j]),
425
+ rhs = matrix(0, nrow = 1, ncol = length(family.outcomes)),
426
+ test = 'F'
427
+ )
428
+ layer2.pvals[[i]][j] <- contrast.lht[['Pr(>F)']][2]
429
+ }
430
+ }
431
+
432
+ ### layer 3: test each contrast on each outcome individually ###
433
+
434
+ for (k in 1:length(family.outcomes)){
435
+
436
+ outcome <- family.outcomes[k]
437
+
438
+ outcome.formula <-
439
+ outcome %.% ' ~\n0 +\n' %.%
440
+ paste(treatments, # treatments (base terms)
441
+ collapse = ' +\n'
442
+ ) %.% ' +\n' %.%
443
+ paste(family.controls, # controls (base terms)
444
+ collapse = ' +\n'
445
+ )## %.% ' +\n' %.%
446
+ ## paste( # treat-ctrl interactions
447
+ ## family.controls.interactions,
448
+ ## collapse = ' +\n'
449
+ ## )
450
+
451
+ cat(rep('-', 40), '\n\nrunning lm:\n\n', outcome.formula, '\n\n', sep = '')
452
+
453
+ outcome.mod <- lm(outcome.formula, d)
454
+ ## hack to eliminate NA coefs
455
+ if (any(is.na(coef(outcome.mod)))){
456
+ drop <- names(coef(outcome.mod))[is.na(coef(outcome.mod))]
457
+ drop <- convert.interaction.names(drop,
458
+ c(family.controls,
459
+ family.controls.interactions
460
+ )
461
+ )
462
+ layer3.notes[[i]][k] <-
463
+ layer3.notes[[i]][k] %.%
464
+ 'dropped the following coefs: ' %.%
465
+ paste(drop, sep = ', ') %.%
466
+ '\n\n'
467
+ outcome.formula <- gsub(
468
+ '\\s+\\+\\s+(' %.% paste(drop, collapse = '|') %.% ')',
469
+ '',
470
+ outcome.formula
471
+ )
472
+ outcome.mod <- lm(outcome.formula, d)
473
+ }
474
+
475
+ outcome.vcov <- vcovHC(outcome.mod)
476
+ if (any(!is.finite(outcome.vcov))){
477
+ outcome.vcov <- vcov(outcome.mod)
478
+ layer3.notes[[i]][k] <-
479
+ layer3.notes[[i]][k] %.%
480
+ 'falling back to non-robust vcov\n\n'
481
+ }
482
+ coef.names <- names(coef(outcome.mod))
483
+
484
+ for (j in 1:nrow(contrasts)){
485
+
486
+ ## convert this interaction term to whatever llm() named it
487
+ treat <- convert.interaction.names(contrasts[j, 'treat'], coef.names)
488
+ ctrl <- convert.interaction.names(contrasts[j, 'ctrl'], coef.names)
489
+ ## test group equality on this outcome
490
+
491
+
492
+ contrast.lht <- linearHypothesis(
493
+ outcome.mod,
494
+ vcov. = outcome.vcov,
495
+ hypothesis.matrix = sprintf('%s - %s', treat, ctrl),
496
+ test = 'F'
497
+ )
498
+
499
+ layer3.pvals[[i]][[j]][k] <- contrast.lht[['Pr(>F)']][2]
500
+ layer3.ests[[i]][[j]][k] <- (
501
+ coef(outcome.mod)[treat] - coef(outcome.mod)[ctrl]
502
+ ) ## * attr(d[[outcome]], 'scaled:scale') # note: uncomment if rescaling
503
+ layer3.ses[[i]][[j]][k] <- sqrt(
504
+ outcome.vcov[treat, treat] +
505
+ outcome.vcov[ctrl, ctrl] -
506
+ 2 * outcome.vcov[treat, ctrl]
507
+ )
508
+ }
509
+ }
510
+ }
511
+
512
+ #################################
513
+ ## multiple testing correction ##
514
+ #################################
515
+
516
+ thresh <- .05
517
+
518
+ ## if layer-1 f-test is infeasible for a family due to collinearity,
519
+ ## obtain layer-1 p-values for that family by simes
520
+ for (i in which(is.na(layer1.pvals))){
521
+ layer1.pvals[i] <- simes(layer2.pvals[[i]])
522
+ }
523
+
524
+ ## multiple testing adjustment for layer 1
525
+ layer1.pvals.adj <- p.adjust(layer1.pvals, 'BH')
526
+ layer1.nonnull.prop <- mean(layer1.pvals.adj < thresh)
527
+
528
+ ## test layer-2 hypotheses only if layer 1 passes
529
+ layer2.pvals.adj <- layer2.pvals # start by copying unadjusted layer-2 p-values
530
+ layer2.nonnull.prop <- rep(NA, length(layer1.pvals.adj))
531
+ names(layer2.nonnull.prop) <- names(layer1.pvals.adj)
532
+ for (i in 1:length(layer1.pvals)){
533
+ if (layer1.pvals.adj[i] < thresh){ # if layer 1 passes
534
+ ## adjust for multiplicity within layer 2...
535
+ layer2.pvals.adj[[i]] <- p.adjust(layer2.pvals[[i]], 'BH')
536
+ ## ... and inflate to account for selection at layer 1
537
+ layer2.pvals.adj[[i]] <-
538
+ pmin(layer2.pvals.adj[[i]] / layer1.nonnull.prop, 1)
539
+ ## keep track of selection at layer 2 for use in layer 3
540
+ layer2.nonnull.prop[i] <- mean(layer2.pvals.adj[[i]] < thresh)
541
+ } else { # if layer 1 fails
542
+ layer2.pvals.adj[[i]] <- rep(NA_real_, length(layer2.pvals[[i]]))
543
+ names(layer2.pvals.adj[[i]]) <- names(layer2.pvals[[i]])
544
+ }
545
+ }
546
+
547
+ ## test layer-3 hypotheses only if layers 1 & 2 pass
548
+ layer3.pvals.adj <- layer3.pvals # start by copying unadjusted layer-3 p-values
549
+ for (i in 1:length(layer1.pvals.adj)){
550
+ for (j in 1:length(layer2.pvals.adj[[i]])){
551
+ ##
552
+ if (layer1.pvals.adj[i] < thresh && # if layer 1 passes...
553
+ layer2.pvals.adj[[i]][j] < thresh # ... and if layer 2 passes
554
+ ){
555
+ ## adjust for multiplicity within layer 3...
556
+ layer3.pvals.adj[[i]][[j]] <- p.adjust(layer3.pvals[[i]][[j]], 'BH')
557
+ ## ... and inflate to account for selection at layer 1
558
+ layer3.pvals.adj[[i]][[j]] <- pmin(
559
+ layer3.pvals.adj[[i]][[j]] / layer1.nonnull.prop / layer2.nonnull.prop[i],
560
+ 1
561
+ )
562
+ } else {
563
+ layer3.pvals.adj[[i]][[j]] <- rep(NA_real_, length(layer3.pvals[[i]][[j]]))
564
+ names(layer3.pvals.adj[[i]][[j]]) <- names(layer3.pvals[[i]][[j]])
565
+ }
566
+ }
567
+ }
568
+
569
+ pvals.adj <- data.table(layer1 = character(0),
570
+ layer2 = character(0),
571
+ layer3 = character(0),
572
+ p.adj = numeric(0),
573
+ est = numeric(0),
574
+ se = numeric(0)
575
+ )
576
+ for (i in 1:length(layer1.pvals.adj)){
577
+ pvals.adj <- rbind(pvals.adj,
578
+ data.table(layer1 = names(layer1.pvals.adj)[i],
579
+ layer2 = 'overall',
580
+ layer3 = 'overall',
581
+ p.adj = layer1.pvals.adj[i],
582
+ est = NA_real_,
583
+ se = NA_real_
584
+ )
585
+ )
586
+ for (j in 1:length(layer2.pvals.adj[[i]])){
587
+ pvals.adj <- rbind(pvals.adj,
588
+ data.table(layer1 = names(layer1.pvals.adj)[i],
589
+ layer2 = names(layer2.pvals.adj[[i]])[j],
590
+ layer3 = 'overall',
591
+ p.adj = layer2.pvals.adj[[i]][j],
592
+ est = NA_real_,
593
+ se = NA_real_
594
+ )
595
+ )
596
+ for (k in 1:length(layer3.pvals.adj[[i]][[j]])){
597
+ pvals.adj <- rbind(pvals.adj,
598
+ data.table(layer1 = names(layer1.pvals.adj)[i],
599
+ layer2 = names(layer2.pvals.adj[[i]])[j],
600
+ layer3 = names(layer3.pvals.adj[[i]][[j]])[k],
601
+ p.adj = layer3.pvals.adj[[i]][[j]][k],
602
+ est = layer3.ests[[i]][[j]][k],
603
+ se = layer3.ses[[i]][[j]][k]
604
+ )
605
+ )
606
+ }
607
+ }
608
+ }
609
+
610
+ ## write out
611
+ fwrite(pvals.adj, '../results/padj_basecontrol_may2024.csv')
612
+
613
+
614
+ ## prettify for reading
615
+ pvals.adj.pretty <- pvals.adj
616
+ colnames(pvals.adj.pretty) <- gsub('layer1',
617
+ 'layer1_hypothesisfamily',
618
+ colnames(pvals.adj.pretty)
619
+ )
620
+ colnames(pvals.adj.pretty) <- gsub('layer2',
621
+ 'layer2_treatmentcontrast',
622
+ colnames(pvals.adj.pretty)
623
+ )
624
+ colnames(pvals.adj.pretty) <- gsub('layer3',
625
+ 'layer3_specificoutcome',
626
+ colnames(pvals.adj.pretty)
627
+ )
628
+
629
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
630
+ 'attitude\\.(pro|anti|neutral)(:assg\\.(inc|cons))?:recsys.(ca|cp|ip|ia)',
631
+ '\\1 \\3 \\4',
632
+ layer2_treatmentcontrast
633
+ )]
634
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
635
+ '.vs.',
636
+ ' - ',
637
+ layer2_treatmentcontrast,
638
+ fixed = TRUE
639
+ )]
640
+ pvals.adj.pretty[, layer2_treatmentcontrast := gsub(
641
+ ' +',
642
+ ' ',
643
+ layer2_treatmentcontrast
644
+ )]
645
+ fwrite(pvals.adj.pretty,
646
+ '../results/padj_basecontrol_pretty_ytrecs_may2024.csv'
647
+ )
648
+
649
+ # pvals.adj.pretty[p.adj < .05 & layer3_specificoutcome != 'overall',]
650
+
651
+ ################################
652
+ ######### OMNIBUS TEST #########
653
+ ################################
654
+
655
+ # Step 1: Create a binary variable indicating increasing condition
656
+ d$is_increasing <- ifelse(d$treatment_arm == "pi" | d$treatment_arm == "ai", 1, 0)
657
+
658
+ # Step 2: Reverse values for individuals in the Pro condition
659
+ d$mw_index_pre[d$treatment_arm %like% "pi|pc"] <- 1 - d$mw_index_pre[d$treatment_arm %like% "pi|pc"]
660
+ d$mw_index[d$treatment_arm %like% "pi|pc"] <- 1 - d$mw_index[d$treatment_arm %like% "pi|pc"]
661
+
662
+ # Step 3: Perform the linear regression (omnibus test)
663
+ model <- lm(I(mw_index - mw_index_pre) ~ is_increasing, data = d)
664
+
665
+ # View the summary of the model
666
+ summary(model)
667
+ rm(list = ls())
code/shorts/07_postprocessing_exploration.R ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: shorts/07_postprocessing_exploration.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ ## Extremizing Sequences and Minimum Wage Opinions
9
+ ## Data collected May 2024 via MTurk/CloudResearch
10
+ ## Analysis for the Extremizing Sequences Experiment
11
+
12
+ ## Preamble ----------------------------
13
+ library(tidyverse)
14
+ library(janitor)
15
+ library(lubridate)
16
+ library(stargazer)
17
+ library(broom)
18
+ library(psych)
19
+
20
+ w12 <- read_csv("../results/intermediate data/shorts/qualtrics_w12_clean_ytrecs_may2024.csv")
21
+
22
+ ## SAMPLE SIZE AND CRONBACH'S ALPHA ------------------
23
+
24
+ # SAMPLE SIZE
25
+ w12 %>%
26
+ filter(!is.na(treatment_arm)) %>%
27
+ count() %>%
28
+ as.integer() %>%
29
+ format(big.mark = ',')
30
+
31
+ # CRONBACH'S ALPHA ON POLICY INDEX
32
+ w12 %>%
33
+ select(minwage15_pre,
34
+ rtwa_v1_pre,
35
+ rtwa_v2_pre,
36
+ mw_support_pre,
37
+ minwage_howhigh_pre,
38
+ mw_help_pre_1,
39
+ mw_restrict_pre_1,
40
+ minwage_text_r_pre
41
+ ) %>%
42
+ alpha() %>%
43
+ `[[`('total') %>%
44
+ `[`('raw_alpha') %>%
45
+ as.numeric() %>%
46
+ format(digits = 2, nsmall = 2) %>%
47
+ paste0('%') %>% # trailing comment char to prevent latex import issue
48
+ writeLines('../results/alpha_study4.txt')
49
+
50
+
51
+ # FACTOR ANALYSIS WITH VARIMAX ROTATION (PRE)
52
+ pca2 <- psych::principal(select(w12, minwage15_pre, rtwa_v1_pre,
53
+ rtwa_v2_pre, mw_support_pre, minwage_howhigh_pre,
54
+ mw_help_pre_1, mw_restrict_pre_1, minwage_text_r_pre),
55
+ rotate="varimax",
56
+ nfactors=1
57
+ )
58
+ pc2 <- pca2$Vaccounted[2]
59
+ writeLines(as.character(round(pc2, 2)),con = "../results/outcomes_pc2_study4_pre.tex",sep = "%")
60
+
61
+
62
+ # FACTOR ANALYSIS WITH VARIMAX ROTATION (POST)
63
+ pca2 <- psych::principal(
64
+ select(w12, minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh, mw_help_1,
65
+ mw_restrict_1, minwage_text_r),
66
+ rotate="varimax",
67
+ nfactors=1
68
+ )
69
+ pc2 <- pca2$Vaccounted[2]
70
+ writeLines(as.character(round(pc2, 2)),con = "../results/outcomes_pc2_study4_post.tex",sep = "%")
71
+
72
+
73
+ ## BASIC DESCRIPTIVE FIGURES ------------------
74
+
75
+ ## TIME SPENT DURING THE SURVEY
76
+ (surveytime_plot <- ggplot(w12) +
77
+ geom_histogram(aes(x=survey_time,y=..density../sum(..density..))) +
78
+ scale_x_continuous("Overall survey time taken (minutes)",
79
+ breaks=seq(0,100,10),
80
+ limits=c(-1,100)
81
+ ) +
82
+ scale_y_continuous("Density") +
83
+ geom_vline(xintercept = mean(w12$survey_time,na.rm=T),lty=3,col="red") +
84
+ annotate(x=mean(w12$survey_time+1,na.rm=T),y=0.13,geom = "text",
85
+ label=paste0("Average: ",round(mean(w12$survey_time,na.rm=T),0)," minutes"),hjust=0) +
86
+ geom_vline(xintercept = median(w12$survey_time,na.rm=T),lty=2,col="red") +
87
+ annotate(x=median(w12$survey_time+1,na.rm=T),y=0.16,geom = "text",
88
+ label=paste0("Median: ",round(median(w12$survey_time,na.rm=T),0)," minutes"),hjust=0) +
89
+ theme_minimal()
90
+ )
91
+
92
+ ## TIME SPENT ON THE INTERFACE
93
+ (ytrecstime_plot <- ggplot(w12) +
94
+ geom_histogram(aes(x=interface_duration/60,y=..density../sum(..density..))) +
95
+ scale_x_continuous("Interface Time Taken (minutes)",
96
+ breaks=seq(0,80,10),
97
+ limits=c(-1,70)) +
98
+ scale_y_continuous("Density") +
99
+ geom_vline(xintercept = mean(w12$interface_duration/60,na.rm=T),lty=3,col="red") +
100
+ annotate(x=mean(w12$interface_duration/60+1,na.rm=T),y=0.1,geom = "text",
101
+ label=paste0("Average: ",round(mean(w12$interface_duration/60,na.rm=T),0)," minutes"),hjust=0) +
102
+ geom_vline(xintercept = median(w12$interface_duration/60,na.rm=T),lty=2,col="red") +
103
+ annotate(x=median(w12$interface_duration/60+1,na.rm=T),y=0.13,geom = "text",
104
+ label=paste0("Median: ",round(median(w12$interface_duration/60,na.rm=T),0)," minutes"),hjust=0) +
105
+ theme_minimal()
106
+ )
107
+
108
+ ## PRE OPINIONS OVERALL
109
+ (hist_mwindex <- ggplot(w12) +
110
+ geom_histogram(aes(x=mw_index_pre)) +
111
+ scale_x_continuous("Minimum Wage Opinions Index, Pre") +
112
+ scale_y_continuous("Count",limits=c(-5,200)) +
113
+ annotate(x = 0.92,y=-3,geom = "text",label="More conservative\nopinions",col="red",hjust=1,size=3,lineheight=0.75) +
114
+ annotate(x = 0.98,xend=1,y=-3,yend=-3,geom = "segment",arrow=arrow(type = "closed",angle = 20),col="red") +
115
+ annotate(x = 0.08,y=-3,geom = "text",label="More liberal\nopinions",col="blue",hjust=0,size=3,lineheight=0.75) +
116
+ annotate(x = 0.02,xend=0.00,y=-3,yend=-3,geom = "segment",arrow=arrow(type = "closed",angle = 20),col="blue") +
117
+ theme_minimal()
118
+ )
119
+
120
+ ## PRE OPINION BY TERCILE
121
+ (hist_mwindex_thirds <- ggplot(w12,aes(x=mw_index_pre)) +
122
+ geom_histogram(data=filter(w12,thirds==1),aes(x=mw_index_pre),fill="blue") +
123
+ geom_histogram(data=filter(w12,thirds==2),aes(x=mw_index_pre),fill="grey") +
124
+ geom_histogram(data=filter(w12,thirds==3),aes(x=mw_index_pre),fill="red") +
125
+ scale_x_continuous("Minimum Wage Opinions Index, Pre") +
126
+ scale_y_continuous("Count",limits=c(-5,200)) +
127
+ annotate(x = 0.92,y=-5,geom = "text",label="More conservative\nopinions",col="red",hjust=1,size=3,lineheight=0.75) +
128
+ annotate(x = 0.98,xend=1,y=-5,yend=-5,geom = "segment",arrow=arrow(type = "closed",angle = 20),col="red") +
129
+ annotate(x = 0.08,y=-5,geom = "text",label="More liberal\nopinions",col="blue",hjust=0,size=3,lineheight=0.75) +
130
+ annotate(x = 0.02,xend=0.00,y=-5,yend=-5,geom = "segment",arrow=arrow(type = "closed",angle = 20),col="blue") +
131
+ theme_minimal()
132
+ )
133
+
134
+ (hist_mwindex_thirds_nocolor <- ggplot(w12,aes(x=mw_index_pre)) +
135
+ geom_histogram(data=filter(w12,thirds==1),aes(x=mw_index_pre),fill="grey") +
136
+ geom_histogram(data=filter(w12,thirds==2),aes(x=mw_index_pre),fill="grey") +
137
+ geom_histogram(data=filter(w12,thirds==3),aes(x=mw_index_pre),fill="grey") +
138
+ scale_x_continuous("Minimum Wage Opinions Index, W1") +
139
+ scale_y_continuous("Count",limits=c(-5,200)) +
140
+ annotate(x = 0.92,y=-5,geom = "text",label="More conservative\nopinions",col="red",hjust=1,size=3,lineheight=0.75) +
141
+ annotate(x = 0.98,xend=1,y=-5,yend=-5,geom = "segment",arrow=arrow(type = "closed",angle = 20),col="red") +
142
+ annotate(x = 0.08,y=-5,geom = "text",label="More liberal\nopinions",col="blue",hjust=0,size=3,lineheight=0.75) +
143
+ annotate(x = 0.02,xend=0.00,y=-5,yend=-5,geom = "segment",arrow=arrow(type = "closed",angle = 20),col="blue") +
144
+ theme_minimal()
145
+ )
146
+
147
+ # SUMMARY PRE OPINIONS FOR EACH CONDITION
148
+ groupsumm_bythirds <- w12 %>%
149
+ group_by(treatment_arm,thirds) %>%
150
+ summarize(n = n()) %>%
151
+ na.omit() %>%
152
+ mutate(treatment_arm = factor(treatment_arm,levels=c("pc", "pi","ac" , "ai"),
153
+ labels = c("Liberal\nconstant",
154
+ "Liberal\nincreasing",
155
+ "Conservative\nconstant",
156
+ "Conservative\nincreasing"),ordered=T),
157
+ thirds = factor(thirds,levels=c(1,2,3),ordered=T))
158
+
159
+ groupsumm <- w12 %>%
160
+ group_by(treatment_arm) %>%
161
+ summarize(
162
+ minwage15 = mean(minwage15_pre,na.rm=T),
163
+ rtwa_v1 = mean(rtwa_v1_pre, na.rm = T),
164
+ rtwa_v2 = mean(rtwa_v2_pre, na.rm = T),
165
+ mw_support = mean(mw_support_pre,na.rm = T),
166
+ minwage_howhigh = mean(minwage_howhigh_pre, na.rm = T),
167
+ mw_help_1 = mean(mw_help_pre_1, na.rm = T),
168
+ mw_restrict_1 = mean(mw_restrict_pre_1,na.rm = T),
169
+ minwage_text_r = mean(minwage_text_r_pre,na.rm = T),
170
+ mw_index_pre = mean(mw_index_pre,na.rm = T),
171
+ n = n()) %>%
172
+ na.omit() %>%
173
+ mutate(treatment_arm = factor(treatment_arm,levels=c("pc",
174
+ "pi",
175
+ "ac" ,
176
+ "ai"),
177
+ labels = c("Liberal\nconstant",
178
+ "Liberal\nincreasing",
179
+ "Conservative\nconstant",
180
+ "Conservative\nincreasing"),ordered=T))
181
+
182
+ # N IN EACH TREATMENT CONDITION
183
+ (plot_hist_n <- ggplot(groupsumm) +
184
+ geom_bar(aes(x=treatment_arm,y=n),stat="identity") +
185
+ geom_text(aes(x=treatment_arm,y=n+15,label=n),stat="identity") +
186
+ scale_x_discrete("Treatment Condition") +
187
+ scale_y_continuous("N") +
188
+ theme_minimal()
189
+ )
190
+
191
+ ## N IN EACH TREATMENT CONDITION COLORED BY THIRDS
192
+ (plot_hist_n_bythirds <- ggplot(groupsumm_bythirds) +
193
+ geom_bar(aes(x=treatment_arm,y=n,fill=thirds),stat="identity") +
194
+ geom_text(data=groupsumm,aes(x=treatment_arm,y=n+15,label=n),stat="identity") +
195
+ scale_x_discrete("Treatment Condition") +
196
+ scale_y_continuous("N") +
197
+ scale_fill_manual("Tercile of\nPre-Opinion",breaks=c(1,2,3),values=c("blue","grey","red")) +
198
+ theme_minimal()
199
+ )
200
+
201
+ ## AVERAGE PRE-OPINION ON MINIMUM WAGE INDEX
202
+ (plot_hist_mwindex <- ggplot(groupsumm) +
203
+ geom_bar(aes(x=treatment_arm,y=mw_index_pre),stat="identity") +
204
+ scale_x_discrete("Treatment Condition") +
205
+ scale_y_continuous("Average Pre-Opinion\non Minimum Wage Index",
206
+ limits=c(0,0.6),
207
+ breaks = seq(0,0.6,0.2),
208
+ labels=c("\n0.0\nMore\nliberal\nopinions","0.2","0.4","More\nconservative\nopinions\n0.6\n\n\n")) +
209
+ theme_minimal() +
210
+ theme(plot.margin = unit(c(1.75,0.5,0.5,0.5),"lines"))
211
+ )
212
+
213
+ # SUMMARY FOR EACH CONDITION
214
+ groupsumm <- w12 %>%
215
+ group_by(treatment_arm) %>%
216
+ summarize(
217
+ minwage15 = mean(minwage15,na.rm=T),
218
+ rtwa_v1 = mean(rtwa_v1, na.rm = T),
219
+ rtwa_v2 = mean(rtwa_v2, na.rm = T),
220
+ mw_support = mean(mw_support,na.rm = T),
221
+ minwage_howhigh = mean(minwage_howhigh, na.rm = T),
222
+ mw_help_1 = mean(mw_help_1, na.rm = T),
223
+ mw_restrict_1 = mean(mw_restrict_1,na.rm = T),
224
+ minwage_text_r = mean(minwage_text_r,na.rm = T),
225
+ mw_index = mean(mw_index,na.rm = T),
226
+ n = n()) %>%
227
+ na.omit() %>%
228
+ mutate(treatment_arm = factor(treatment_arm,levels=c("pc",
229
+ "pi",
230
+ "ac" ,
231
+ "ai"),
232
+ labels = c("Liberal\nconstant",
233
+ "Liberal\nincreasing",
234
+ "Conservative\nconstant",
235
+ "Conservative\nincreasing"),
236
+ ordered=T))
237
+
238
+ (plot_hist_mwindex <- ggplot(groupsumm) +
239
+ geom_bar(aes(x=treatment_arm,y=mw_index),stat="identity") +
240
+ scale_x_discrete("Treatment Condition") +
241
+ scale_y_continuous("Average Post-Opinion\non Minimum Wage Index",
242
+ limits=c(0,0.6),
243
+ breaks = seq(0,0.6,0.2),
244
+ labels=c("\n0.0\nMore\nliberal\nopinions","0.2","0.4","More\nconservative\nopinions\n0.6\n\n\n")) +
245
+ theme_minimal() +
246
+ theme(plot.margin = unit(c(1.75,0.5,0.5,0.5),"lines"))
247
+ )
248
+
249
+ ## CHANGES IN OPINION BETWEEN WAVES
250
+ treatsumm <- w12 %>%
251
+ group_by(treatment_arm) %>%
252
+ summarize(minwage15 = mean(minwage15-minwage15_pre,na.rm=T),
253
+ rtwa_v1 = mean(rtwa_v1-rtwa_v1_pre, na.rm = T),
254
+ rtwa_v2 = mean(rtwa_v2-rtwa_v2_pre, na.rm = T),
255
+ mw_support = mean(mw_support-mw_support_pre,na.rm = T),
256
+ minwage_howhigh = mean(minwage_howhigh-minwage_howhigh_pre, na.rm = T),
257
+ mw_help_1 = mean(mw_help_1-mw_help_pre_1, na.rm = T),
258
+ mw_restrict_1 = mean(mw_restrict_1-mw_restrict_pre_1,na.rm = T),
259
+ minwage_text_r = mean(minwage_text_r-minwage_text_r_pre,na.rm = T),
260
+ mw_index_change = mean(mw_index - mw_index_pre,na.rm = T),
261
+ n = n()) %>%
262
+ na.omit() %>%
263
+ mutate(treatment_arm = factor(treatment_arm,levels=c("pc",
264
+ "pi",
265
+ "ac" ,
266
+ "ai"),
267
+ labels = c("Liberal\nconstant",
268
+ "Liberal\nincreasing",
269
+ "Conservative\nconstant",
270
+ "Conservative\nincreasing"),
271
+ ordered=T))
272
+
273
+ w1w2_corrplot <- corrplot::corrplot(cor(select(w12,
274
+ minwage15_pre, rtwa_v1_pre, rtwa_v2_pre, mw_support_pre,
275
+ minwage_howhigh_pre, mw_help_pre_1, mw_restrict_pre_1, minwage_text_r_pre,
276
+ minwage15, rtwa_v1, rtwa_v2, mw_support, minwage_howhigh,
277
+ mw_help_1, mw_restrict_1, minwage_text_r), use = "complete.obs")[1:8,9:16],method = "shade")
278
+ dev.off()
279
+
280
+ ## AVERAGE OPINION CHANGE POST-PRE ON MIN WAGE POLICY INDEX
281
+ (plot_hist_mwindex <- ggplot(treatsumm) +
282
+ geom_bar(aes(x=treatment_arm,y=mw_index_change),stat="identity") +
283
+ scale_x_discrete("Treatment Condition") +
284
+ scale_y_continuous("Average Opinion Change Post-Pre\non Min. Wage Policy Index",
285
+ limits=c(-0.2,0.2),
286
+ breaks = seq(-0.2,0.2,0.1),
287
+ labels=c("\n\n\n-0.2\nLiberal\nopinion\nchange","-0.1","0.00","0.1","Conservative\nopinion\nchange\n0.2\n\n\n")
288
+ ) +
289
+ theme_minimal() +
290
+ theme(plot.margin = unit(c(1.75,0.5,0.5,0.5),"lines"))
291
+ )
292
+
293
+ ### CHANGE FOR MODERATES
294
+ treatsumm_thirds <- w12 %>%
295
+ group_by(thirds, treatment_arm) %>%
296
+ summarize(minwage15 = mean(minwage15-minwage15_pre,na.rm=T),
297
+ rtwa_v1 = mean(rtwa_v1-rtwa_v1_pre, na.rm = T),
298
+ rtwa_v2 = mean(rtwa_v2-rtwa_v2_pre, na.rm = T),
299
+ mw_support = mean(mw_support-mw_support_pre,na.rm = T),
300
+ minwage_howhigh = mean(minwage_howhigh-minwage_howhigh_pre, na.rm = T),
301
+ mw_help_1 = mean(mw_help_1-mw_help_pre_1, na.rm = T),
302
+ mw_restrict_1 = mean(mw_restrict_1-mw_restrict_pre_1,na.rm = T),
303
+ minwage_text_r = mean(minwage_text_r-minwage_text_r_pre,na.rm = T),
304
+ mw_index_change = mean(mw_index - mw_index_pre,na.rm = T),
305
+ n = n()) %>%
306
+ na.omit() %>%
307
+ mutate(treatment_arm = factor(treatment_arm,levels=c("pc",
308
+ "pi",
309
+ "ac" ,
310
+ "ai"),
311
+ labels = c("Liberal\nconstant",
312
+ "Liberal\nincreasing",
313
+ "Conservative\nconstant",
314
+ "Conservative\nincreasing"),
315
+ ordered=T))
316
+
317
+ (plot_hist_mwindex_thirds <- ggplot(treatsumm_thirds %>% filter(thirds == 2)) +
318
+ geom_bar(aes(x=treatment_arm,y=mw_index_change),stat="identity") +
319
+ scale_x_discrete("Treatment Condition") +
320
+ scale_y_continuous("Average Opinion Change Post-Pre\non Min. Wage Policy Index\nfor Moderates",
321
+ limits=c(-0.2,0.2),
322
+ breaks = seq(-0.2,0.2,0.1),
323
+ labels=c("\n\n\n-0.2\nLiberal\nopinion\nchange","-0.1","0.00","0.1","Conservative\nopinion\nchange\n0.2\n\n\n")
324
+ ) +
325
+ theme_minimal() +
326
+ theme(plot.margin = unit(c(1.75,0.5,0.5,0.5),"lines"))
327
+ )
328
+
329
+
330
+ ## BASE CONTROL FIGURES --------------------------------------
331
+
332
+ ##
333
+ ## RUN 04_analysis_multipletesting_basecontrol_may2024.R, THEN READ IN ADJUSTED P-VALUES
334
+ ##
335
+
336
+ coefs_basecontrol <- read_csv("../results/padj_basecontrol_pretty_ytrecs_may2024.csv")
337
+
338
+ outcome_labels <- data.frame(outcome = c("Minimum wage\nindex"),
339
+ specificoutcome = c("mw_index"),
340
+ family = c(rep("Policy Attitudes\n(unit scale, + is more conservative)",1)))
341
+
342
+
343
+ #### THE effect of INCREASING vs. CONSTANT assignment among LIBERAL participants ####
344
+ coefs_third1_basecontrol <- coefs_basecontrol %>%
345
+ filter(layer2_treatmentcontrast == "attitude.pro:recsys.pi - attitude.pro:recsys.pc" &
346
+ layer3_specificoutcome != "overall")
347
+
348
+
349
+ coefs_third1_basecontrol$outcome = outcome_labels$outcome[match(coefs_third1_basecontrol$layer3_specificoutcome,
350
+ outcome_labels$specificoutcome)]
351
+
352
+ coefs_third1_basecontrol$family = outcome_labels$family[match(coefs_third1_basecontrol$layer3_specificoutcome,outcome_labels$specificoutcome)]
353
+
354
+ coefs_third1_basecontrol <- mutate(coefs_third1_basecontrol,
355
+ family = factor(family,
356
+ levels = c("Policy Attitudes\n(unit scale, + is more conservative)"
357
+ ),ordered = T))
358
+
359
+ coefs_third1_basecontrol <- coefs_third1_basecontrol %>%
360
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
361
+ ci_hi_99 = est + qnorm(0.995)*se,
362
+ ci_lo_95 = est + qnorm(0.025)*se,
363
+ ci_hi_95 = est + qnorm(0.975)*se,
364
+ ci_lo_90 = est + qnorm(0.05)*se,
365
+ ci_hi_90 = est + qnorm(0.95)*se,
366
+ plotorder = nrow(coefs_third1_basecontrol):1
367
+ )
368
+
369
+ writeLines(as.character(round(100*abs(filter(coefs_third1_basecontrol,layer3_specificoutcome=="pro_fraction_chosen")$est),0)),
370
+ con = "../results/beta_recsys_pro_fraction_chosen_third1.tex",sep="%")
371
+
372
+
373
+ #### THE effect of INCREASING vs. CONSTANT assignment among LIBERAL participants ####
374
+ (coefplot_third1_basecontrol <- ggplot(filter(coefs_third1_basecontrol),aes(y=plotorder)) +
375
+ geom_errorbarh(aes(xmin=ci_lo_95,xmax=ci_hi_95),height=0,lwd=0.5) +
376
+ geom_errorbarh(aes(xmin=ci_lo_90,xmax=ci_hi_90),height=0,lwd=1) +
377
+ geom_point(aes(x=est),size=1.5) +
378
+ geom_vline(xintercept = 0,lty=2) +
379
+ facet_wrap(~family,ncol=1,scales="free") +
380
+ scale_y_continuous("",
381
+ breaks = coefs_third1_basecontrol$plotorder,
382
+ labels = coefs_third1_basecontrol$outcome) +
383
+ scale_x_continuous("Increasing Liberal seed vs. Constant Liberal seed assignment \namong Liberal participants \n(95% and 90% CIs)") +
384
+ coord_cartesian(xlim=c(-0.2,0.2)) +
385
+ theme_bw(base_family = "sans") +
386
+ theme(strip.background = element_rect(fill="white"))
387
+ )
388
+
389
+ #### THE effect of INCREASING vs. CONSTANT assignment among CONSERVATIVE participants ####
390
+ coefs_third3_basecontrol <- coefs_basecontrol %>%
391
+ filter(layer2_treatmentcontrast == "attitude.anti:recsys.ai - attitude.anti:recsys.ac" &
392
+ layer3_specificoutcome != "overall")
393
+
394
+ coefs_third3_basecontrol$outcome = outcome_labels$outcome[match(coefs_third3_basecontrol$layer3_specificoutcome,
395
+ outcome_labels$specificoutcome)]
396
+
397
+ coefs_third3_basecontrol$family = outcome_labels$family[match(coefs_third3_basecontrol$layer3_specificoutcome,
398
+ outcome_labels$specificoutcome)]
399
+
400
+ coefs_third3_basecontrol <- mutate(coefs_third3_basecontrol,
401
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)"
402
+ ),ordered = T))
403
+
404
+
405
+ coefs_third3_basecontrol <- coefs_third3_basecontrol %>%
406
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
407
+ ci_hi_99 = est + qnorm(0.995)*se,
408
+ ci_lo_95 = est + qnorm(0.025)*se,
409
+ ci_hi_95 = est + qnorm(0.975)*se,
410
+ ci_lo_90 = est + qnorm(0.05)*se,
411
+ ci_hi_90 = est + qnorm(0.95)*se,
412
+ plotorder = nrow(coefs_third3_basecontrol):1
413
+ )
414
+
415
+ writeLines(as.character(round(100*abs(filter(coefs_third3_basecontrol,layer3_specificoutcome=="pro_fraction_chosen")$est),0)),con = "../results/beta_recsys_pro_fraction_chosen_third3.tex",sep="%")
416
+ writeLines(as.character(round(abs(filter(coefs_third3_basecontrol,layer3_specificoutcome=="mw_index_w2")$est),2)),con = "../results/beta_recsys_mwindex_third3.tex",sep="%")
417
+ writeLines(as.character(round(abs(filter(coefs_third3_basecontrol,layer3_specificoutcome=="mw_index_w2")$ci_hi_95),2)),con = "../results/cihi_recsys_mwindex_third3.tex",sep="%")
418
+
419
+
420
+ #### THE effect of INCREASING vs. CONSTANT assignment among CONSERVATIVE participants ####
421
+ (coefplot_third3_basecontrol <- ggplot(filter(coefs_third3_basecontrol),aes(y=plotorder)) +
422
+ geom_errorbarh(aes(xmin=ci_lo_95,xmax=ci_hi_95),height=0,lwd=0.5) +
423
+ geom_errorbarh(aes(xmin=ci_lo_90,xmax=ci_hi_90),height=0,lwd=1) +
424
+ geom_point(aes(x=est),size=1.5) +
425
+ geom_vline(xintercept = 0,lty=2) +
426
+ facet_wrap(~family,ncol=1,scales="free") +
427
+ scale_y_continuous("",
428
+ breaks = coefs_third3_basecontrol$plotorder,labels = coefs_third3_basecontrol$outcome) +
429
+ scale_x_continuous("Increasing Conservative vs. Constant Conservative \n seed among Conservative participants \n(95% and 90% CIs)") +
430
+ coord_cartesian(xlim=c(-0.2,0.2)) +
431
+ theme_bw(base_family = "sans") +
432
+ theme(strip.background = element_rect(fill="white"))
433
+ )
434
+
435
+ #### THE effect of INCREASING vs. CONSTANT assignment among MODERATE participants assigned to a LIBERAL sequence ####
436
+ coefs_third2_pro_basecontrol <- coefs_basecontrol %>%
437
+ filter(layer2_treatmentcontrast == "attitude.neutral:recsys.pi - attitude.neutral:recsys.pc" &
438
+ layer3_specificoutcome != "overall")
439
+
440
+
441
+ coefs_third2_pro_basecontrol$outcome = outcome_labels$outcome[match(coefs_third2_pro_basecontrol$layer3_specificoutcome,
442
+ outcome_labels$specificoutcome)]
443
+
444
+ coefs_third2_pro_basecontrol$family = outcome_labels$family[match(coefs_third2_pro_basecontrol$layer3_specificoutcome,
445
+ outcome_labels$specificoutcome)]
446
+
447
+ coefs_third2_pro_basecontrol <- mutate(coefs_third2_pro_basecontrol,
448
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)"
449
+ ),ordered = T))
450
+
451
+ coefs_third2_pro_basecontrol <- coefs_third2_pro_basecontrol %>%
452
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
453
+ ci_hi_99 = est + qnorm(0.995)*se,
454
+ ci_lo_95 = est + qnorm(0.025)*se,
455
+ ci_hi_95 = est + qnorm(0.975)*se,
456
+ ci_lo_90 = est + qnorm(0.05)*se,
457
+ ci_hi_90 = est + qnorm(0.95)*se,
458
+ plotorder = nrow(coefs_third2_pro_basecontrol):1
459
+ )
460
+ writeLines(as.character(round(100*abs(filter(coefs_third2_pro_basecontrol,layer3_specificoutcome=="pro_fraction_chosen")$est),0)),con = "../results/beta_recsys_pro_fraction_chosen_third2_proseed.tex",sep="%")
461
+ writeLines(as.character(abs(round(filter(coefs_third2_pro_basecontrol,layer3_specificoutcome=="platform_duration")$est,2))),con = "../results/beta_recsys_duration_third2_proseed.tex",sep="%")
462
+ writeLines(as.character(abs(round(filter(coefs_third2_pro_basecontrol,layer3_specificoutcome=="platform_duration")$est*60,1))),con = "../results/beta_minutes_recsys_duration_third2_proseed.tex",sep="%")
463
+
464
+ #### THE effect of INCREASING vs. CONSTANT assignment among MODERATE participants assigned to a LIBERAL sequence ####
465
+ (coefplot_third2_pro_basecontrol <- ggplot(filter(coefs_third2_pro_basecontrol),aes(y=plotorder)) +
466
+ geom_errorbarh(aes(xmin=ci_lo_95,xmax=ci_hi_95),height=0,lwd=0.5) +
467
+ geom_errorbarh(aes(xmin=ci_lo_90,xmax=ci_hi_90),height=0,lwd=1) +
468
+ geom_point(aes(x=est),size=1.5) +
469
+ geom_vline(xintercept = 0,lty=2) +
470
+ facet_wrap(~family,ncol=1,scales="free") +
471
+ scale_y_continuous("",
472
+ breaks = coefs_third2_pro_basecontrol$plotorder,labels = coefs_third2_pro_basecontrol$outcome) +
473
+ scale_x_continuous("Increasing Liberal vs. Constant Liberal seed among Moderates \n(95% and 90% CIs)") +
474
+ coord_cartesian(xlim=c(-0.2,0.2)) +
475
+ theme_bw(base_family = "sans") +
476
+ theme(strip.background = element_rect(fill="white"))
477
+ )
478
+ ggsave(coefplot_third2_pro_basecontrol,
479
+ filename = "../results/coefplot_third2_pro_basecontrol.png",width=5,height=8)
480
+
481
+ #### THE effect of INCREASING vs. CONSTANT assignment among MODERATE participants assigned to a CONSERVATIVE sequence ####
482
+ coefs_third2_anti_basecontrol <- coefs_basecontrol %>%
483
+ filter(layer2_treatmentcontrast == "attitude.neutral:recsys.ai - attitude.neutral:recsys.ac" &
484
+ layer3_specificoutcome != "overall")
485
+
486
+
487
+ coefs_third2_anti_basecontrol$outcome = outcome_labels$outcome[match(coefs_third2_anti_basecontrol$layer3_specificoutcome,
488
+ outcome_labels$specificoutcome)]
489
+
490
+ coefs_third2_anti_basecontrol$family = outcome_labels$family[match(coefs_third2_anti_basecontrol$layer3_specificoutcome,
491
+ outcome_labels$specificoutcome)]
492
+
493
+ coefs_third2_anti_basecontrol <- mutate(coefs_third2_anti_basecontrol,
494
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)"
495
+ ),ordered = T))
496
+
497
+
498
+ coefs_third2_anti_basecontrol <- coefs_third2_anti_basecontrol %>%
499
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
500
+ ci_hi_99 = est + qnorm(0.995)*se,
501
+ ci_lo_95 = est + qnorm(0.025)*se,
502
+ ci_hi_95 = est + qnorm(0.975)*se,
503
+ ci_lo_90 = est + qnorm(0.05)*se,
504
+ ci_hi_90 = est + qnorm(0.95)*se,
505
+ plotorder = nrow(coefs_third2_anti_basecontrol):1
506
+ )
507
+
508
+ writeLines(as.character(round(100*abs(filter(coefs_third2_anti_basecontrol,layer3_specificoutcome=="pro_fraction_chosen")$est),0)),con = "../results/beta_recsys_pro_fraction_chosen_third2_antiseed.tex",sep="%")
509
+ writeLines(as.character(round(filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily=="gunpolicy")$est,2)),con = "../results/beta_recsys_mwindex_third2_antiseed.tex",sep="%")
510
+ writeLines(as.character(round(filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily=="gunpolicy")$est + qnorm(0.975)*filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily=="gunpolicy")$se,2)),con = "../results/cihi_recsys_mwindex_third2_antiseed.tex",sep="%")
511
+ writeLines(as.character(round(filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily=="gunpolicy")$est + qnorm(0.025)*filter(coefs_third2_anti_basecontrol,layer1_hypothesisfamily=="gunpolicy")$se,2)),con = "../results/cilo_recsys_mwindex_third2_antiseed.tex",sep="%")
512
+
513
+ #### THE effect of INCREASING vs. CONSTANT assignment among MODERATE participants assigned to a CONSERVATIVE sequence ####
514
+ (coefplot_third2_anti_basecontrol <- ggplot(filter(coefs_third2_anti_basecontrol),aes(y=plotorder)) +
515
+ geom_errorbarh(aes(xmin=ci_lo_95,xmax=ci_hi_95),height=0,lwd=0.5) +
516
+ geom_errorbarh(aes(xmin=ci_lo_90,xmax=ci_hi_90),height=0,lwd=1) +
517
+ geom_point(aes(x=est),size=1.5) +
518
+ geom_vline(xintercept = 0,lty=2) +
519
+ facet_wrap(~family,ncol=1,scales="free") +
520
+ scale_y_continuous("",
521
+ breaks = coefs_third2_anti_basecontrol$plotorder,labels = coefs_third2_anti_basecontrol$outcome) +
522
+ scale_x_continuous("Increasing Conservative vs. Constant Conservative seed \namong Moderates \n(95% and 90% CIs)") +
523
+ coord_cartesian(xlim=c(-0.2,0.2)) +
524
+ theme_bw(base_family = "sans") +
525
+ theme(strip.background = element_rect(fill="white"))
526
+ )
527
+ ggsave(coefplot_third2_anti_basecontrol,
528
+ filename = "../results/coefplot_third2_anti_basecontrol.png",width=5,height=8)
529
+
530
+
531
+ #### THE effect of CONSERVATIVE vs. LIBERAL assignment among MODERATE participants assigned to an INCREASING sequence ####
532
+ coefs_third2_31_basecontrol <- coefs_basecontrol %>%
533
+ filter(layer2_treatmentcontrast == "attitude.neutral:recsys.ai - attitude.neutral:recsys.pi" &
534
+ layer3_specificoutcome != "overall")
535
+
536
+
537
+ coefs_third2_31_basecontrol$outcome = outcome_labels$outcome[match(coefs_third2_31_basecontrol$layer3_specificoutcome,
538
+ outcome_labels$specificoutcome)]
539
+
540
+ coefs_third2_31_basecontrol$family = outcome_labels$family[match(coefs_third2_31_basecontrol$layer3_specificoutcome,
541
+ outcome_labels$specificoutcome)]
542
+
543
+ coefs_third2_31_basecontrol <- mutate(coefs_third2_31_basecontrol,
544
+ family = factor(family,levels = c("Policy Attitudes\n(unit scale, + is more conservative)"
545
+ ),ordered = T))
546
+
547
+
548
+ coefs_third2_31_basecontrol <- coefs_third2_31_basecontrol %>%
549
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
550
+ ci_hi_99 = est + qnorm(0.995)*se,
551
+ ci_lo_95 = est + qnorm(0.025)*se,
552
+ ci_hi_95 = est + qnorm(0.975)*se,
553
+ ci_lo_90 = est + qnorm(0.05)*se,
554
+ ci_hi_90 = est + qnorm(0.95)*se,
555
+ plotorder = nrow(coefs_third2_31_basecontrol):1
556
+ )
557
+ writeLines(as.character(round(100*abs(filter(coefs_third2_31_basecontrol,layer3_specificoutcome=="pro_fraction_chosen")$est),0)),con = "../results/beta_seed_pro_fraction_chosen_third2_31.tex",sep="%")
558
+
559
+
560
+ #### THE effect of CONSERVATIVE vs. LIBERAL assignment among MODERATE participants assigned to an INCREASING sequence ####
561
+ (coefplot_third2_31_basecontrol <- ggplot(filter(coefs_third2_31_basecontrol),aes(y=plotorder)) +
562
+ geom_errorbarh(aes(xmin=ci_lo_95,xmax=ci_hi_95),height=0,lwd=0.5) +
563
+ geom_errorbarh(aes(xmin=ci_lo_90,xmax=ci_hi_90),height=0,lwd=1) +
564
+ geom_point(aes(x=est),size=1.5) +
565
+ geom_vline(xintercept = 0,lty=2) +
566
+ facet_wrap(~family,ncol=1,scales="free") +
567
+ scale_y_continuous("",
568
+ breaks = coefs_third2_31_basecontrol$plotorder,labels = coefs_third2_31_basecontrol$outcome) +
569
+ scale_x_continuous("Conservative vs. Liberal seed assignment among Moderates\n with Increasing assignment\n(95% and 90% CIs)") +
570
+ coord_cartesian(xlim=c(-0.2,0.2)) +
571
+ theme_bw(base_family = "sans") +
572
+ theme(strip.background = element_rect(fill="white"))
573
+ )
574
+ ggsave(coefplot_third2_31_basecontrol,
575
+ filename = "../results/coefplot_third2_31_basecontrol.png",width=5,height=8)
576
+
577
+ #### THE effect of CONSERVATIVE vs. LIBERAL assignment among MODERATE participants assigned to an CONSTANT sequence ####
578
+ coefs_third2_22_basecontrol <- coefs_basecontrol %>%
579
+ filter(layer2_treatmentcontrast == "attitude.neutral:recsys.ac - attitude.neutral:recsys.pc" &
580
+ layer3_specificoutcome != "overall")
581
+
582
+ coefs_third2_22_basecontrol$outcome = outcome_labels$outcome[match(coefs_third2_22_basecontrol$layer3_specificoutcome,
583
+ outcome_labels$specificoutcome)]
584
+
585
+ coefs_third2_22_basecontrol$family = outcome_labels$family[match(coefs_third2_22_basecontrol$layer3_specificoutcome,
586
+ outcome_labels$specificoutcome)]
587
+
588
+ coefs_third2_22_basecontrol <- mutate(coefs_third2_22_basecontrol,
589
+ family = factor(family,levels = c(#"Platform Interaction",
590
+ "Policy Attitudes\n(unit scale, + is more conservative)"
591
+ #"Media Trust\n(unit scale, + is more trusting)",
592
+ #"Affective Polarization\n(unit scale, + is greater polarization)"
593
+ ),ordered = T))
594
+
595
+ #### THE effect of CONSERVATIVE vs. LIBERAL assignment among MODERATE participants assigned to an CONSTANT sequence ####
596
+ coefs_third2_22_basecontrol <- coefs_third2_22_basecontrol %>%
597
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
598
+ ci_hi_99 = est + qnorm(0.995)*se,
599
+ ci_lo_95 = est + qnorm(0.025)*se,
600
+ ci_hi_95 = est + qnorm(0.975)*se,
601
+ ci_lo_90 = est + qnorm(0.05)*se,
602
+ ci_hi_90 = est + qnorm(0.95)*se,
603
+ plotorder = nrow(coefs_third2_22_basecontrol):1
604
+ )
605
+ writeLines(as.character(round(100*abs(filter(coefs_third2_22_basecontrol,layer3_specificoutcome=="pro_fraction_chosen")$est),0)),con = "../results/beta_seed_pro_fraction_chosen_third2_22.tex",sep="%")
606
+
607
+
608
+ (coefplot_third2_22_basecontrol <- ggplot(filter(coefs_third2_22_basecontrol),aes(y=plotorder)) +
609
+ geom_errorbarh(aes(xmin=ci_lo_95,xmax=ci_hi_95),height=0,lwd=0.5) +
610
+ geom_errorbarh(aes(xmin=ci_lo_90,xmax=ci_hi_90),height=0,lwd=1) +
611
+ geom_point(aes(x=est),size=1.5) +
612
+ geom_vline(xintercept = 0,lty=2) +
613
+ facet_wrap(~family,ncol=1,scales="free") +
614
+ scale_y_continuous("",
615
+ breaks = coefs_third2_22_basecontrol$plotorder,labels = coefs_third2_22_basecontrol$outcome) +
616
+ scale_x_continuous("Conservative vs. Liberal seed assignment among Moderates\n with Constant assignment\n(95% and 90% CIs)") +
617
+ coord_cartesian(xlim=c(-0.2,0.2)) +
618
+ theme_bw(base_family = "sans") +
619
+ theme(strip.background = element_rect(fill="white"))
620
+ )
621
+
622
+ rm(list = ls())
code/shorts/08_plot_shorts_figure.R ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: shorts/08_plot_shorts_figure.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ library(tidyverse)
9
+ library(janitor)
10
+ library(lubridate)
11
+ library(stargazer)
12
+ library(broom)
13
+ library(psych)
14
+ library(ggtext)
15
+ library(ggplot2)
16
+
17
+ # plotting w/ custom colors (optional)
18
+ red_mit = '#A31F34'
19
+ red_light = '#A9606C'
20
+ blue_mit = '#315485'
21
+ grey_light= '#C2C0BF'
22
+ grey_dark = '#8A8B8C'
23
+ black = '#353132'
24
+ vpurple = "#440154FF"
25
+ vyellow = "#FDE725FF"
26
+ vgreen = "#21908CFF"
27
+
28
+
29
+ ## MODEL RESULTS
30
+ coefs_basecontrol <- read_csv("../results/padj_basecontrol_pretty_ytrecs_may2024.csv")
31
+
32
+ outcome_labels <- data.frame(outcome = c("Minimum wage<br>index"),
33
+ specificoutcome = c("mw_index"),
34
+ family = c(rep("Policy Attitudes<br>(unit scale, + is more conservative)",1)))
35
+
36
+
37
+ # HYP 1
38
+ #### THE effect of INCREASING vs. CONSTANT assignment among LIBERAL participants ####
39
+ coefs_hyp1 <- coefs_basecontrol %>%
40
+ filter(layer2_treatmentcontrast == "attitude.pro:recsys.pi - attitude.pro:recsys.pc" &
41
+ layer3_specificoutcome != "overall")
42
+
43
+
44
+ coefs_hyp1$outcome = outcome_labels$outcome[match(coefs_hyp1$layer3_specificoutcome,
45
+ outcome_labels$specificoutcome)]
46
+
47
+ coefs_hyp1$family = outcome_labels$family[match(coefs_hyp1$layer3_specificoutcome,outcome_labels$specificoutcome)]
48
+
49
+ coefs_hyp1 <- mutate(coefs_hyp1,
50
+ family = factor(family,
51
+ levels = c("Policy Attitudes<br>(unit scale, + is more conservative)"
52
+ ),ordered = T))
53
+
54
+ coefs_hyp1 <- coefs_hyp1 %>%
55
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
56
+ ci_hi_99 = est + qnorm(0.995)*se,
57
+ ci_lo_95 = est + qnorm(0.025)*se,
58
+ ci_hi_95 = est + qnorm(0.975)*se,
59
+ ci_lo_90 = est + qnorm(0.05)*se,
60
+ ci_hi_90 = est + qnorm(0.95)*se,
61
+ plotorder = nrow(coefs_hyp1):1
62
+ )
63
+
64
+ ## HYP 2
65
+ #### THE effect of INCREASING vs. CONSTANT assignment among CONSERVATIVE participants ####
66
+ coefs_hyp2 <- coefs_basecontrol %>%
67
+ filter(layer2_treatmentcontrast == "attitude.anti:recsys.ai - attitude.anti:recsys.ac" &
68
+ layer3_specificoutcome != "overall")
69
+
70
+ coefs_hyp2$outcome = outcome_labels$outcome[match(coefs_hyp2$layer3_specificoutcome,
71
+ outcome_labels$specificoutcome)]
72
+
73
+ coefs_hyp2$family = outcome_labels$family[match(coefs_hyp2$layer3_specificoutcome,
74
+ outcome_labels$specificoutcome)]
75
+
76
+ coefs_hyp2 <- mutate(coefs_hyp2,
77
+ family = factor(family,levels = c("Policy Attitudes<br>(unit scale, + is more conservative)"
78
+ ),ordered = T))
79
+
80
+
81
+ coefs_hyp2 <- coefs_hyp2 %>%
82
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
83
+ ci_hi_99 = est + qnorm(0.995)*se,
84
+ ci_lo_95 = est + qnorm(0.025)*se,
85
+ ci_hi_95 = est + qnorm(0.975)*se,
86
+ ci_lo_90 = est + qnorm(0.05)*se,
87
+ ci_hi_90 = est + qnorm(0.95)*se,
88
+ plotorder = nrow(coefs_hyp2):1
89
+ )
90
+
91
+ # HYP 3
92
+ #### THE effect of INCREASING vs. CONSTANT assignment among MODERATE participants assigned to a LIBERAL sequence ####
93
+ coefs_hyp3 <- coefs_basecontrol %>%
94
+ filter(layer2_treatmentcontrast == "attitude.neutral:recsys.pi - attitude.neutral:recsys.pc" &
95
+ layer3_specificoutcome != "overall")
96
+
97
+
98
+ coefs_hyp3$outcome = outcome_labels$outcome[match(coefs_hyp3$layer3_specificoutcome,
99
+ outcome_labels$specificoutcome)]
100
+
101
+ coefs_hyp3$family = outcome_labels$family[match(coefs_hyp3$layer3_specificoutcome,
102
+ outcome_labels$specificoutcome)]
103
+
104
+ coefs_hyp3 <- mutate(coefs_hyp3,
105
+ family = factor(family,levels = c("Policy Attitudes<br>(unit scale, + is more conservative)"
106
+ ),ordered = T))
107
+
108
+ coefs_hyp3 <- coefs_hyp3 %>%
109
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
110
+ ci_hi_99 = est + qnorm(0.995)*se,
111
+ ci_lo_95 = est + qnorm(0.025)*se,
112
+ ci_hi_95 = est + qnorm(0.975)*se,
113
+ ci_lo_90 = est + qnorm(0.05)*se,
114
+ ci_hi_90 = est + qnorm(0.95)*se,
115
+ plotorder = nrow(coefs_hyp3):1
116
+ )
117
+
118
+ # HYP 4
119
+ #### THE effect of INCREASING vs. CONSTANT assignment among MODERATE participants assigned to a CONSERVATIVE sequence ####
120
+ coefs_hyp4 <- coefs_basecontrol %>%
121
+ filter(layer2_treatmentcontrast == "attitude.neutral:recsys.ai - attitude.neutral:recsys.ac" &
122
+ layer3_specificoutcome != "overall")
123
+
124
+
125
+ coefs_hyp4$outcome = outcome_labels$outcome[match(coefs_hyp4$layer3_specificoutcome,
126
+ outcome_labels$specificoutcome)]
127
+
128
+ coefs_hyp4$family = outcome_labels$family[match(coefs_hyp4$layer3_specificoutcome,
129
+ outcome_labels$specificoutcome)]
130
+
131
+ coefs_hyp4 <- mutate(coefs_hyp4,
132
+ family = factor(family,levels = c("Policy Attitudes<br>(unit scale, + is more conservative)"
133
+ ),ordered = T))
134
+
135
+
136
+ coefs_hyp4 <- coefs_hyp4 %>%
137
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
138
+ ci_hi_99 = est + qnorm(0.995)*se,
139
+ ci_lo_95 = est + qnorm(0.025)*se,
140
+ ci_hi_95 = est + qnorm(0.975)*se,
141
+ ci_lo_90 = est + qnorm(0.05)*se,
142
+ ci_hi_90 = est + qnorm(0.95)*se,
143
+ plotorder = nrow(coefs_hyp4):1
144
+ )
145
+
146
+ # HYP 5
147
+ #### THE effect of CONSERVATIVE vs. LIBERAL assignment among MODERATE participants assigned to an INCREASING sequence ####
148
+ coefs_hyp5 <- coefs_basecontrol %>%
149
+ filter(layer2_treatmentcontrast == "attitude.neutral:recsys.ai - attitude.neutral:recsys.pi" &
150
+ layer3_specificoutcome != "overall")
151
+
152
+
153
+ coefs_hyp5$outcome = outcome_labels$outcome[match(coefs_hyp5$layer3_specificoutcome,
154
+ outcome_labels$specificoutcome)]
155
+
156
+ coefs_hyp5$family = outcome_labels$family[match(coefs_hyp5$layer3_specificoutcome,
157
+ outcome_labels$specificoutcome)]
158
+
159
+ coefs_hyp5 <- mutate(coefs_hyp5,
160
+ family = factor(family,levels = c("Policy Attitudes<br>(unit scale, + is more conservative)"
161
+ ),ordered = T))
162
+
163
+
164
+ coefs_hyp5 <- coefs_hyp5 %>%
165
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
166
+ ci_hi_99 = est + qnorm(0.995)*se,
167
+ ci_lo_95 = est + qnorm(0.025)*se,
168
+ ci_hi_95 = est + qnorm(0.975)*se,
169
+ ci_lo_90 = est + qnorm(0.05)*se,
170
+ ci_hi_90 = est + qnorm(0.95)*se,
171
+ plotorder = nrow(coefs_hyp5):1
172
+ )
173
+
174
+ # HYP 6
175
+ #### THE effect of CONSERVATIVE vs. LIBERAL assignment among MODERATE participants assigned to an CONSTANT sequence ####
176
+ coefs_hyp6 <- coefs_basecontrol %>%
177
+ filter(layer2_treatmentcontrast == "attitude.neutral:recsys.ac - attitude.neutral:recsys.pc" &
178
+ layer3_specificoutcome != "overall")
179
+
180
+ coefs_hyp6$outcome = outcome_labels$outcome[match(coefs_hyp6$layer3_specificoutcome,
181
+ outcome_labels$specificoutcome)]
182
+
183
+ coefs_hyp6$family = outcome_labels$family[match(coefs_hyp6$layer3_specificoutcome,
184
+ outcome_labels$specificoutcome)]
185
+
186
+ coefs_hyp6 <- mutate(coefs_hyp6,
187
+ family = factor(family,levels = c("Policy Attitudes<br>(unit scale, + is more conservative)"
188
+ ),ordered = T))
189
+
190
+ #### THE effect of CONSERVATIVE vs. LIBERAL assignment among MODERATE participants assigned to an CONSTANT sequence ####
191
+ coefs_hyp6 <- coefs_hyp6 %>%
192
+ mutate(ci_lo_99 = est + qnorm(0.001)*se,
193
+ ci_hi_99 = est + qnorm(0.995)*se,
194
+ ci_lo_95 = est + qnorm(0.025)*se,
195
+ ci_hi_95 = est + qnorm(0.975)*se,
196
+ ci_lo_90 = est + qnorm(0.05)*se,
197
+ ci_hi_90 = est + qnorm(0.95)*se,
198
+ plotorder = nrow(coefs_hyp6):1,
199
+ )
200
+
201
+ # Combine all data frames into one
202
+ all_coefs <- bind_rows(
203
+ mutate(coefs_hyp1, hypothesis = "**Increasing vs. Constant**<br>Liberal Seed<br>Liberal Ideologues", Sample="**Increasing vs. Constant**<br>Liberal Seed"),
204
+ mutate(coefs_hyp2, hypothesis = "**Increasing vs. Constant**<br>Conservative Seed<br>Conservative Ideologues", Sample="**Increasing vs. Constant**<br>Conservative Seed"),
205
+ mutate(coefs_hyp3, hypothesis = "**Increasing vs. Constant**<br>Liberal Seed<br>Moderates", Sample="**Increasing vs. Constant**<br>Liberal Seed"),
206
+ mutate(coefs_hyp4, hypothesis = "**Increasing vs. Constant**<br>Conservative Seed<br>Moderates", Sample="**Increasing vs. Constant**<br>Conservative Seed"),
207
+ mutate(coefs_hyp5, hypothesis = "**Conservative vs. Liberal**<br>Increasing Extremity<br>Moderates", Sample="**Conservative vs. Liberal**<br>Increasing Extremity"),
208
+ mutate(coefs_hyp6, hypothesis = "**Conservative vs. Liberal**<br>Constant Extremity<br>Moderates", Sample="**Conservative vs. Liberal**<br>Constant Extremity")
209
+ )
210
+
211
+ # Define the order of hypotheses
212
+ hypothesis_order <- c("**Increasing vs. Constant**<br>Liberal Seed<br>Liberal Ideologues",
213
+ "**Increasing vs. Constant**<br>Conservative Seed<br>Conservative Ideologues",
214
+ "**Increasing vs. Constant**<br>Liberal Seed<br>Moderates",
215
+ "**Increasing vs. Constant**<br>Conservative Seed<br>Moderates",
216
+ "**Conservative vs. Liberal**<br>Increasing Extremity<br>Moderates",
217
+ "**Conservative vs. Liberal**<br>Constant Extremity<br>Moderates")
218
+
219
+ # Reorder the factor levels
220
+ all_coefs$hypothesis <- factor(all_coefs$hypothesis, levels = hypothesis_order)
221
+
222
+ all_coefs <- all_coefs %>%
223
+ mutate(
224
+ attitude = case_when(
225
+ row_number() == 1 ~ "Liberal Ideologues",
226
+ row_number() == 2 ~ "Conservative Ideologues",
227
+ TRUE ~ "Moderates"
228
+ ),
229
+ alpha = ifelse(p.adj<0.05, T, F),
230
+ alpha = as.logical(alpha),
231
+ alpha = replace_na(alpha,F),
232
+ Sample_color = as.character(Sample),
233
+ Sample_color = replace(Sample_color,alpha==F,"insig")
234
+ )
235
+
236
+ all_coefs <- all_coefs %>%
237
+ mutate(
238
+ sign_color = case_when(
239
+ ci_lo_95 < 0 & ci_hi_95 > 0 ~ grey_dark, # black color code
240
+ TRUE ~ "darkgreen" # blue color code (or replace with your desired color code)
241
+ )
242
+ )
243
+
244
+ all_coefs <- all_coefs %>%
245
+ mutate(
246
+ attitude_color = case_when(
247
+ attitude == "Liberal Ideologues" ~ blue_mit,
248
+ attitude == "Conservative Ideologues" ~ red_mit,
249
+ attitude == "Moderates" ~ "darkgreen"
250
+ )
251
+ )
252
+
253
+
254
+ all_coefs <- all_coefs %>%
255
+ mutate(Sample = factor(Sample,levels=c("**Increasing vs. Constant**<br>Liberal Seed",
256
+ "**Increasing vs. Constant**<br>Conservative Seed",
257
+ "**Conservative vs. Liberal**<br>Increasing Extremity",
258
+ "**Conservative vs. Liberal**<br>Constant Extremity"),
259
+ ordered=T)) #%>%
260
+ #mutate(layer1_hypothesisfamily = recode(layer1_hypothesisfamily,
261
+ # "mwpolicy"="policy"),
262
+ # layer3_specificoutcome = recode(layer3_specificoutcome,
263
+ # "mw_index"="policyindex"))
264
+
265
+
266
+ # Create a data frame for attitude shapes
267
+ attitude_shapes <- data.frame(attitude = c("Liberal Ideologues", "Conservative Ideologues", "Moderates"))
268
+
269
+ # Plot the attitude shapes
270
+ attitude_bar <- ggplot(attitude_shapes, aes(x = attitude)) +
271
+ geom_point(aes(shape = attitude), size = 3) +
272
+ scale_shape_manual(values = c("Liberal Ideologues" = 16, "Conservative Ideologues" = 17, "Moderates" = 15)) +
273
+ theme_void() +
274
+ theme(legend.position = "none")
275
+
276
+ # Create a data frame for attitude shapes
277
+ attitude_shapes <- data.frame(attitude = c("Liberal Ideologues", "Conservative Ideologues", "Moderates"))
278
+
279
+ # Plot the attitude shapes
280
+ attitude_bar <- ggplot(attitude_shapes, aes(x = attitude)) +
281
+ geom_point(aes(shape = attitude), size = 5) +
282
+ scale_shape_manual(values = c("Liberal Ideologues" = 16, "Conservative Ideologues" = 17, "Moderates" = 15)) +
283
+ theme_void() +
284
+ theme(legend.position = "none")
285
+
286
+ # Plot
287
+ combined_plot <- ggplot(all_coefs, aes(x = est, y = Sample, group = attitude, shape = attitude)) +
288
+ # 95% CI: Adjust alpha based on significance
289
+ geom_errorbarh(aes(xmin = ci_lo_95, xmax = ci_hi_95, color = sign_color, alpha = 0.8),
290
+ height = 0, lwd = 1, position = position_dodge(width = 0.8)) +
291
+
292
+ # 90% CI: Adjust alpha based on significance
293
+ geom_errorbarh(aes(xmin = ci_lo_90, xmax = ci_hi_90, color = sign_color, alpha = 0.8),
294
+ height = 0, lwd = 1.5, position = position_dodge(width = 0.8)) +
295
+
296
+ # Points: Adjust alpha directly for better visibility of insignificant shapes
297
+ geom_point(aes(color = sign_color),
298
+ size = 4, position = position_dodge(width = 0.8),
299
+ alpha = ifelse(all_coefs$alpha, 1, 0.7)) + # Make insignificant points more visible with 0.7 alpha
300
+
301
+ # Labels: Adjust alpha based on significance
302
+ geom_text(data = all_coefs,
303
+ aes(x = est, label = attitude, color = attitude_color),
304
+ alpha = 1, size = 6,
305
+ position = position_dodge(width = 0.8), vjust = -0.6) +
306
+
307
+ geom_vline(xintercept = 0, lty = 2) +
308
+ facet_wrap(~ family, ncol = 1, scales = "free") +
309
+ coord_cartesian(xlim = c(-0.06, 0.18), clip="off") +
310
+ scale_x_continuous(" Minimum Wage Policy Effect Size\n(95% and 90% CIs)") +
311
+ scale_color_identity() + # Ensure that the color column is used directly
312
+ labs(y = NULL) + # Remove y-axis title
313
+ theme_bw(base_family = "sans") +
314
+ theme(strip.background = element_rect(fill = "white"),
315
+ legend.position = "none",
316
+ axis.text.y = element_markdown(color = "black", size=16),
317
+ axis.title.x = element_markdown(color = "black", size=16),
318
+ strip.text = element_markdown(size = 18)
319
+ )
320
+ combined_plot
321
+ ggsave(combined_plot, filename = "../results/shorts_combined_intervals.pdf", width = 8.5, height = 5)
322
+ rm(list = ls())
323
+
324
+
325
+
326
+
code/supplemental/14_api_browser_comparison.R ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cat(rep('=', 80),
2
+ '\n\n',
3
+ 'OUTPUT FROM: supplemental/14_api_browser_comparison.R',
4
+ '\n\n',
5
+ sep = ''
6
+ )
7
+
8
+ # Load the API tree and the natural tree datasets
9
+
10
+ api.tree <- read.csv('../data/supplemental/api_tree.csv')
11
+ natural.tree <- readRDS('../data/supplemental/natural_tree.rds')
12
+
13
+ #############
14
+ # Figure S3 #
15
+ #############
16
+
17
+ natural.tree$w <- NA
18
+ natural.tree$w[natural.tree$step == 1] <- 1 # All recs in the first step come from a common origin video
19
+ natural.tree$w[natural.tree$step == 2] <- 1 # All recs in the second step come from a unique origin (one of the 20 recs from the first video)
20
+
21
+ # If a recommendation appears in a step multiple times, we only get recs for that video once, but upweight those videos according to freq
22
+ for(i in 1:nrow(natural.tree)){
23
+ if(natural.tree$step[i] == 1) next
24
+ if(natural.tree$step[i] == 2) next
25
+ paths.to.rec <- (natural.tree$originID[i] == natural.tree$recID) & (natural.tree$step == natural.tree$step[i] - 1)
26
+ sum(natural.tree$w[paths.to.rec])
27
+ natural.tree$w[i] <- sum(natural.tree$w[paths.to.rec])
28
+ }
29
+
30
+ weighted.means <- c(weighted.mean(natural.tree$in.api.tree[natural.tree$step == 1], w = natural.tree$w[natural.tree$step == 1]),
31
+ weighted.mean(natural.tree$in.api.tree[natural.tree$step == 2], w = natural.tree$w[natural.tree$step == 2]),
32
+ weighted.mean(natural.tree$in.api.tree[natural.tree$step == 3], w = natural.tree$w[natural.tree$step == 3]),
33
+ weighted.mean(natural.tree$in.api.tree[natural.tree$step == 4], w = natural.tree$w[natural.tree$step == 4]),
34
+ weighted.mean(natural.tree$in.api.tree[natural.tree$step == 5], w = natural.tree$w[natural.tree$step == 5])
35
+ )
36
+
37
+ pdf('../results/proportion_by_step_in_tree_weighted.pdf')
38
+ barplot(weighted.means,
39
+ main = 'Weighted Proportion of Natural Recs in API Tree (In Step)',
40
+ names.arg = c('Step 1', 'Step 2', 'Step 3', 'Step 4', 'Step 5'),
41
+ ylab = 'Proportion of Naturalistic Recs in API Tree',
42
+ ylim = c(0,1)
43
+ )
44
+ dev.off()
45
+
46
+ ###############################
47
+ # Table in Appendix Section 4 #
48
+ ###############################
49
+
50
+ set.seed(63110)
51
+ to.label <- natural.tree[sample(which(natural.tree$in.api.tree == 0 & natural.tree$step == 5), 10),]
52
+ step.five.recs <- to.label$recID
53
+
54
+ for(i in 1:nrow(to.label)){
55
+ step.six.origin <- to.label$recID[i]
56
+ step.six.origin.str <- paste0('\\verb|', step.six.origin, '|')
57
+ if(!(step.six.origin %in% api.tree$RecID)) step.six.origin.str <- paste0(step.six.origin.str, '***')
58
+
59
+ step.five.origin <- to.label$originID[i]
60
+ step.five.origin.str <- paste0('\\verb|', step.five.origin, '|')
61
+ if(!(step.five.origin %in% api.tree$RecID)) step.five.origin.str <- paste0(step.five.origin.str, '***')
62
+
63
+ step.four.origin <- natural.tree$originID[natural.tree$step == 4 & natural.tree$recID == step.five.origin][1]
64
+ step.four.origin.str <- paste0('\\verb|', step.four.origin, '|')
65
+ if(!(step.four.origin %in% api.tree$RecID)) step.four.origin.str <- paste0(step.four.origin.str, '***')
66
+
67
+ step.three.origin <- natural.tree$originID[natural.tree$step == 3 & natural.tree$recID == step.four.origin][1]
68
+ step.three.origin.str <- paste0('\\verb|', step.three.origin, '|')
69
+ if(!(step.three.origin %in% api.tree$RecID)) step.three.origin.str <- paste0(step.three.origin.str, '***')
70
+
71
+ step.two.origin <- natural.tree$originID[natural.tree$step == 2 & natural.tree$recID == step.three.origin][1]
72
+ step.two.origin.str <- paste0('\\verb|', step.two.origin, '|')
73
+ if(!(step.two.origin %in% api.tree$RecID)) step.two.origin.str <- paste0(step.two.origin.str, '***')
74
+
75
+ step.one.origin <- natural.tree$originID[natural.tree$step == 1 & natural.tree$recID == step.two.origin][1]
76
+ step.one.origin.str <- paste0('\\verb|', step.one.origin, '|')
77
+ if(!(step.one.origin %in% api.tree$RecID)) step.one.origin.str <- paste0(step.one.origin.str, '***')
78
+
79
+ row <- paste(step.six.origin.str,
80
+ step.five.origin.str,
81
+ step.four.origin.str,
82
+ step.three.origin.str,
83
+ step.two.origin.str,
84
+ step.one.origin.str,
85
+ sep = ' & ')
86
+ row <- paste0(row, ' \\\\')
87
+
88
+ cat(row)
89
+ cat('\n')
90
+ }
code/supplemental/experiment durations/09_experiment_times.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Average Time Spent in Experiments
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ import seaborn as sns
6
+ import matplotlib.pyplot as plt
7
+ import rpy2.robjects as robjects
8
+ from rpy2.robjects import pandas2ri
9
+ from IPython.display import Image
10
+
11
+ print('=' * 80 + '\n\n' + 'OUTPUT FROM: supplemental/experiment durations/09_experiment_times.py' + '\n\n')
12
+
13
+ study1 = pd.read_csv('../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w123_clean.csv')
14
+ study2 = pd.read_csv('../results/intermediate data/minimum wage (issue 2)/qualtrics_w12_clean.csv')
15
+ study3 = pd.read_csv('../results/intermediate data/minimum wage (issue 2)/yg_w12_clean.csv')
16
+ study4 = pd.read_csv('../results/intermediate data/shorts/qualtrics_w12_clean_ytrecs_may2024.csv')
17
+
18
+ # ## Outlier Elimination
19
+ # Manually patching the guaranteed incorrect values
20
+ # by taking the interface end time to be `pmin(survey end time, raw interface end time)`
21
+ # Create a new column 'interface_end_time' by taking the minimum of the two columns
22
+
23
+ # Convert 'end_date_w2' and 'end_time2' to datetime format
24
+ study1['end_date_w2'] = pd.to_datetime(study1['end_date_w2'], format='%Y-%m-%d %H:%M:%S', errors='coerce')
25
+ study1['end_time2'] = pd.to_datetime(study1['end_time2'], format='%Y-%m-%dT%H:%M:%SZ',errors='coerce').dt.tz_localize('UTC').dt.tz_convert('America/New_York').dt.tz_localize(None)
26
+ study1['start_time2'] = pd.to_datetime(study1['start_time2'], format='%Y-%m-%dT%H:%M:%SZ',errors='coerce').dt.tz_localize('UTC').dt.tz_convert('America/New_York').dt.tz_localize(None)
27
+
28
+ # Create a new column 'interface_end_time_fixed' by taking the minimum of both dates
29
+ study1['interface_end_time_fixed'] = study1['end_date_w2'].combine(study1['end_time2'],
30
+ lambda x, y: x if pd.notna(y) and (pd.isna(x) or x < y) else y)
31
+
32
+ study1['interface_end_time_fixed'] = study1['interface_end_time_fixed'].where(
33
+ pd.notna(study1['interface_end_time_fixed']), np.nan
34
+ )
35
+
36
+ # Convert 'end_date_w2' and 'end_time2' to datetime format
37
+ study2['end_date_w2'] = pd.to_datetime(study2['end_date_w2'], format='%Y-%m-%d %H:%M:%S', errors='coerce')
38
+ study2['end_time2'] = pd.to_datetime(study2['end_time2'], format='%Y-%m-%dT%H:%M:%SZ',errors='coerce').dt.tz_localize('UTC').dt.tz_convert('America/New_York').dt.tz_localize(None)
39
+ study2['start_time2'] = pd.to_datetime(study2['start_time2'], format='%Y-%m-%dT%H:%M:%SZ',errors='coerce').dt.tz_localize('UTC').dt.tz_convert('America/New_York').dt.tz_localize(None)
40
+
41
+ # Create a new column 'interface_end_time_fixed' by taking the minimum of both dates
42
+ study2['interface_end_time_fixed'] = study2['end_date_w2'].combine(study2['end_time2'],
43
+ lambda x, y: x if pd.notna(y) and (pd.isna(x) or x < y) else y)
44
+
45
+ study2['interface_end_time_fixed'] = study2['interface_end_time_fixed'].where(
46
+ pd.notna(study2['interface_end_time_fixed']), np.nan
47
+ )
48
+
49
+ # Convert 'end_date_w2' and 'end_time2' to datetime format
50
+ study3['start_date_w2'] = pd.to_datetime(study3['start_date_w2'], format='%Y-%m-%dT%H:%M:%SZ').dt.tz_localize(None)
51
+ study3['end_date_w2'] = pd.to_datetime(study3['end_date_w2'], format='%Y-%m-%dT%H:%M:%SZ').dt.tz_localize(None)
52
+ study3['end_time2'] = pd.to_datetime(study3['end_time2'], format='%Y-%m-%dT%H:%M:%SZ').dt.tz_localize(None)
53
+ study3['start_time2'] = pd.to_datetime(study3['start_time2'], format='%Y-%m-%dT%H:%M:%SZ').dt.tz_localize(None)
54
+ # Create a new column 'interface_time_fixed' with the minimum of both dates
55
+ study3['interface_end_time_fixed'] = np.minimum(study3['end_date_w2'], study3['end_time2'])
56
+
57
+ # Fixed duration values for study 4 is done.
58
+ study4['interface_time_fixed'] = np.minimum(study4['survey_time'], study4['interface_duration']/60)
59
+
60
+ # Ensure both columns are in datetime format, and make them timezone-naive
61
+ study1['interface_end_time_fixed'] = pd.to_datetime(study1['interface_end_time_fixed'], errors='coerce').dt.tz_localize(None)
62
+ study1['start_time2'] = pd.to_datetime(study1['start_time2'], errors='coerce').dt.tz_localize(None)
63
+ # Subtract the times while handling nulls
64
+ study1['interface_time_fixed'] = study1['interface_end_time_fixed'] - study1['start_time2']
65
+
66
+ # Ensure both columns are in datetime format, and make them timezone-naive
67
+ study2['interface_end_time_fixed'] = pd.to_datetime(study2['interface_end_time_fixed'], errors='coerce').dt.tz_localize(None)
68
+ study2['start_time2'] = pd.to_datetime(study2['start_time2'], errors='coerce').dt.tz_localize(None)
69
+ # Subtract the times while handling nulls
70
+ study2['interface_time_fixed'] = study2['interface_end_time_fixed'] - study2['start_time2']
71
+
72
+ # Ensure both columns are in datetime format, and make them timezone-naive
73
+ study3['interface_end_time_fixed'] = pd.to_datetime(study3['interface_end_time_fixed'], errors='coerce').dt.tz_localize(None)
74
+ study3['start_time2'] = pd.to_datetime(study3['start_time2'], errors='coerce').dt.tz_localize(None)
75
+ # Subtract the times while handling nulls
76
+ study3['interface_time_fixed'] = study3['interface_end_time_fixed'] - study3['start_time2']
77
+
78
+ # Convert the timedelta to minutes
79
+ study1['interface_time_fixed_minutes'] = study1['interface_time_fixed'].dt.total_seconds() / 60
80
+ study2['interface_time_fixed_minutes'] = study2['interface_time_fixed'].dt.total_seconds() / 60
81
+ study3['interface_time_fixed_minutes'] = study3['interface_time_fixed'].dt.total_seconds() / 60
82
+
83
+
84
+ #### Windsorization
85
+
86
+ # Copy the 'duration' column to a new 'platform_duration' column
87
+ study1['platform_duration'] = study1['duration']
88
+ study2['platform_duration'] = study2['duration']
89
+ study3['platform_duration'] = study3['duration']
90
+ study4['platform_duration'] = study4['interface_duration']
91
+
92
+ # Calculate the 2.5% and 97.5% quantiles
93
+ lower_quantile_study1 = study1['duration'].quantile(0.025)
94
+ upper_quantile_study1 = study1['duration'].quantile(0.975)
95
+
96
+ lower_quantile_study2 = study2['duration'].quantile(0.025)
97
+ upper_quantile_study2 = study2['duration'].quantile(0.975)
98
+
99
+ lower_quantile_study3 = study3['duration'].quantile(0.025)
100
+ upper_quantile_study3 = study3['duration'].quantile(0.975)
101
+
102
+ lower_quantile_study4 = study4['interface_duration'].quantile(0.025)
103
+ upper_quantile_study4 = study4['interface_duration'].quantile(0.975)
104
+
105
+ # Apply Windsorization: cap the values at 2.5% and 97.5%
106
+ study1['platform_duration'] = study1['platform_duration'].apply(
107
+ lambda x: lower_quantile_study1 if x <= lower_quantile_study1 else upper_quantile_study1 if x >= upper_quantile_study1 else x
108
+ )
109
+
110
+ study2['platform_duration'] = study2['platform_duration'].apply(
111
+ lambda x: lower_quantile_study2 if x <= lower_quantile_study2 else upper_quantile_study2 if x >= upper_quantile_study2 else x
112
+ )
113
+
114
+ study3['platform_duration'] = study3['platform_duration'].apply(
115
+ lambda x: lower_quantile_study3 if x <= lower_quantile_study3 else upper_quantile_study3 if x >= upper_quantile_study3 else x
116
+ )
117
+
118
+ study4['platform_duration'] = study4['platform_duration'].apply(
119
+ lambda x: lower_quantile_study4 if x <= lower_quantile_study4 else upper_quantile_study4 if x >= upper_quantile_study4 else x
120
+ )
121
+
122
+ # Copy the 'duration' column to a new 'platform_duration' column
123
+ study1['platform_duration'] = study1['interface_time_fixed_minutes']
124
+ study2['platform_duration'] = study2['interface_time_fixed_minutes']
125
+ study3['platform_duration'] = study3['interface_time_fixed_minutes']
126
+ study4['platform_duration'] = study4['interface_time_fixed']
127
+
128
+ # Calculate the 2.5% and 97.5% quantiles
129
+ lower_quantile_study1 = study1['interface_time_fixed_minutes'].quantile(0.025)
130
+ upper_quantile_study1 = study1['interface_time_fixed_minutes'].quantile(0.975)
131
+
132
+ lower_quantile_study2 = study2['interface_time_fixed_minutes'].quantile(0.025)
133
+ upper_quantile_study2 = study2['interface_time_fixed_minutes'].quantile(0.975)
134
+
135
+ lower_quantile_study3 = study3['interface_time_fixed_minutes'].quantile(0.025)
136
+ upper_quantile_study3 = study3['interface_time_fixed_minutes'].quantile(0.975)
137
+
138
+ lower_quantile_study4 = study4['interface_time_fixed'].quantile(0.025)
139
+ upper_quantile_study4 = study4['interface_time_fixed'].quantile(0.975)
140
+
141
+ # Apply Windsorization: cap the values at 2.5% and 97.5%
142
+ study1['platform_duration'] = study1['platform_duration'].apply(
143
+ lambda x: lower_quantile_study1 if x <= lower_quantile_study1 else upper_quantile_study1 if x >= upper_quantile_study1 else x
144
+ )
145
+
146
+ study2['platform_duration'] = study2['platform_duration'].apply(
147
+ lambda x: lower_quantile_study2 if x <= lower_quantile_study2 else upper_quantile_study2 if x >= upper_quantile_study2 else x
148
+ )
149
+
150
+ study3['platform_duration'] = study3['platform_duration'].apply(
151
+ lambda x: lower_quantile_study3 if x <= lower_quantile_study3 else upper_quantile_study3 if x >= upper_quantile_study3 else x
152
+ )
153
+
154
+ study4['platform_duration'] = study4['platform_duration'].apply(
155
+ lambda x: lower_quantile_study4 if x <= lower_quantile_study4 else upper_quantile_study4 if x >= upper_quantile_study4 else x
156
+ )
157
+
158
+ # Overall interface time spent (mean, Studies 1-3)
159
+ print('Mean Interface Time for Studies 1-3:', pd.concat([study1[study1.treatment_arm != 'control']['platform_duration'],
160
+ study2[study2.treatment_arm != 'control']['platform_duration'],
161
+ study3[study3.treatment_arm != 'control']['platform_duration']],
162
+ ignore_index=True).mean())
163
+
164
+ print('******')
165
+ # Overall interface time spent (mean, Studies 1-4)
166
+ print('Mean Interface Time for Studies 1-4:', pd.concat([study1[study1.treatment_arm != 'control']['platform_duration'],
167
+ study2[study2.treatment_arm != 'control']['platform_duration'],
168
+ study3[study3.treatment_arm != 'control']['platform_duration'],
169
+ study4['platform_duration']],
170
+ ignore_index=True).mean())
171
+
172
+ print('******')
173
+ # Interface time spent each
174
+ print('Study1 Interface:',study1[study1.treatment_arm != 'control']['platform_duration'].mean())
175
+ print('Study2 Interface:',study2[study2.treatment_arm != 'control']['platform_duration'].mean())
176
+ print('Study3 Interface:',study3[study3.treatment_arm != 'control']['platform_duration'].mean())
177
+ print('Study4 Interface:',study4['platform_duration'].mean())
178
+
179
+
180
+ ### Plots
181
+
182
+ # Enable the pandas-to-R conversion
183
+ pandas2ri.activate()
184
+
185
+ # Convert the DataFrame to R DataFrame
186
+ w123_r = pandas2ri.py2rpy(study1)
187
+
188
+ # Define the R code to create the plot and save it as an image
189
+ r_code = """
190
+ library(ggplot2)
191
+ library(dplyr)
192
+
193
+ # Filter the data
194
+ w123_filtered <- w123 %>% filter(treatment_arm != "control")
195
+
196
+ # Create the plot and save it as a PNG file
197
+ surveytime_plot <- ggplot(w123_filtered) +
198
+ geom_histogram(aes(x = platform_duration, y = ..density.. / sum(..density..))) +
199
+ scale_x_continuous("Interface Time Taken (minutes),\nexcluding control respondents", breaks = seq(0, 100, 20), limits = c(-1, 101)) +
200
+ scale_y_continuous("Density") +
201
+ geom_vline(xintercept = mean(w123_filtered$platform_duration, na.rm = TRUE), linetype = "dashed", color = "red") +
202
+ annotate("text", x = mean(w123_filtered$platform_duration, na.rm = TRUE) + 1, y = 0.13, label = paste0("Average: ", round(mean(w123_filtered$platform_duration, na.rm = TRUE), 0), " minutes"), hjust = 0) +
203
+ geom_vline(xintercept = median(w123_filtered$platform_duration, na.rm = TRUE), linetype = "dotted", color = "red") +
204
+ annotate("text", x = median(w123_filtered$platform_duration , na.rm = TRUE) + 1, y = 0.16, label = paste0("Median: ", round(median(w123_filtered$platform_duration, na.rm = TRUE), 0), " minutes"), hjust = 0) +
205
+ theme_minimal()
206
+ ggsave(surveytime_plot,filename = "../results/video_platform_duration_study1.pdf",height=3,width=5)
207
+ """
208
+
209
+ # Load the DataFrame into the R environment & run
210
+ robjects.globalenv['w123'] = w123_r
211
+ robjects.r(r_code)
212
+ #Image(filename="video_platform_duration_study1.png")
213
+
214
+ # Enable the pandas-to-R conversion
215
+ pandas2ri.activate()
216
+
217
+ # Convert the dataframe to R DataFrame
218
+ w123_r = pandas2ri.py2rpy(study2)
219
+
220
+ # Define the R code to create the plot and save it as an image
221
+ r_code = """
222
+ library(ggplot2)
223
+ library(dplyr)
224
+
225
+ # Filter the data
226
+ w123_filtered <- w123 %>% filter(treatment_arm != "control")
227
+
228
+ # Create the plot and save it as a PNG file
229
+ surveytime_plot <- ggplot(w123_filtered) +
230
+ geom_histogram(aes(x = platform_duration, y = ..density.. / sum(..density..))) +
231
+ scale_x_continuous("Interface Time Taken (minutes),\nexcluding control respondents", breaks = seq(0, 100, 20), limits = c(-1, 101)) +
232
+ scale_y_continuous("Density") +
233
+ geom_vline(xintercept = mean(w123_filtered$platform_duration, na.rm = TRUE), linetype = "dashed", color = "red") +
234
+ annotate("text", x = mean(w123_filtered$platform_duration, na.rm = TRUE) + 1, y = 0.13, label = paste0("Average: ", round(mean(w123_filtered$platform_duration, na.rm = TRUE), 0), " minutes"), hjust = 0) +
235
+ geom_vline(xintercept = median(w123_filtered$platform_duration, na.rm = TRUE), linetype = "dotted", color = "red") +
236
+ annotate("text", x = median(w123_filtered$platform_duration , na.rm = TRUE) + 1, y = 0.16, label = paste0("Median: ", round(median(w123_filtered$platform_duration, na.rm = TRUE), 0), " minutes"), hjust = 0) +
237
+ theme_minimal()
238
+
239
+ ggsave(surveytime_plot,filename = "../results/video_platform_duration_study2.pdf",height=3,width=5)
240
+ """
241
+
242
+ robjects.globalenv['w123'] = w123_r
243
+ robjects.r(r_code)
244
+ #Image(filename="video_platform_duration_study2.png")
245
+
246
+ # Enable the pandas-to-R conversion
247
+ pandas2ri.activate()
248
+ w123_r = pandas2ri.py2rpy(study3)
249
+
250
+ # Define the R code to create the plot and save it as an image
251
+ r_code = """
252
+ library(ggplot2)
253
+ library(dplyr)
254
+
255
+ # Filter the data
256
+ w123_filtered <- w123 %>% filter(treatment_arm != "control")
257
+
258
+ # Create the plot and save it as a PNG file
259
+ surveytime_plot <- ggplot(w123_filtered) +
260
+ geom_histogram(aes(x = platform_duration, y = ..density.. / sum(..density..))) +
261
+ scale_x_continuous("Interface Time Taken (minutes),\nexcluding control respondents", breaks = seq(0, 100, 20), limits = c(-1, 101)) +
262
+ scale_y_continuous("Density") +
263
+ geom_vline(xintercept = mean(w123_filtered$platform_duration, na.rm = TRUE), linetype = "dashed", color = "red") +
264
+ annotate("text", x = mean(w123_filtered$platform_duration, na.rm = TRUE) + 1, y = 0.13, label = paste0("Average: ", round(mean(w123_filtered$platform_duration, na.rm = TRUE), 0), " minutes"), hjust = 0) +
265
+ geom_vline(xintercept = median(w123_filtered$platform_duration, na.rm = TRUE), linetype = "dotted", color = "red") +
266
+ annotate("text", x = median(w123_filtered$platform_duration , na.rm = TRUE) + 1, y = 0.16, label = paste0("Median: ", round(median(w123_filtered$platform_duration, na.rm = TRUE), 0), " minutes"), hjust = 0) +
267
+ theme_minimal()
268
+ ggsave(surveytime_plot,filename = "../results/video_platform_duration_study3.pdf",height=3,width=5)
269
+ """
270
+
271
+ # Load the DataFrame into the R environment
272
+ robjects.globalenv['w123'] = w123_r
273
+ robjects.r(r_code)
274
+ #Image(filename="video_platform_duration_study3.png")
275
+
276
+
277
+ pandas2ri.activate()
278
+ w123_r = pandas2ri.py2rpy(study4)
279
+
280
+ # Define the R code to create the plot and save it as an image
281
+ r_code = """
282
+ library(ggplot2)
283
+ library(dplyr)
284
+
285
+ # Filter the data
286
+ w123_filtered <- w123 %>% filter(treatment_arm != "control")
287
+
288
+ # Create the plot and save it as a PNG file
289
+ surveytime_plot <- ggplot(w123_filtered) +
290
+ geom_histogram(aes(x = platform_duration, y = ..density.. / sum(..density..))) +
291
+ scale_x_continuous("Interface Time Taken (minutes)", breaks = seq(0, 100, 20), limits = c(-1, 101)) +
292
+ scale_y_continuous("Density") +
293
+ geom_vline(xintercept = mean(w123_filtered$platform_duration, na.rm = TRUE), linetype = "dashed", color = "red") +
294
+ annotate("text", x = mean(w123_filtered$platform_duration, na.rm = TRUE) + 1, y = 0.13, label = paste0("Average: ", round(mean(w123_filtered$platform_duration, na.rm = TRUE), 0), " minutes"), hjust = 0) +
295
+ geom_vline(xintercept = median(w123_filtered$platform_duration, na.rm = TRUE), linetype = "dotted", color = "red") +
296
+ annotate("text", x = median(w123_filtered$platform_duration , na.rm = TRUE) + 1, y = 0.16, label = paste0("Median: ", round(median(w123_filtered$platform_duration, na.rm = TRUE), 0), " minutes"), hjust = 0) +
297
+ theme_minimal()
298
+ ggsave(surveytime_plot,filename = "../results/video_platform_duration_study4.pdf",height=3,width=5)
299
+ """
300
+
301
+ robjects.globalenv['w123'] = w123_r
302
+ robjects.r(r_code)
303
+ #Image(filename="video_platform_duration_study4.png")
304
+
305
+
306
+
307
+
code/supplemental/increasingly extreme recommendations/10_partisanship_increase.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Expected Value of Partisanship Increase
5
+
6
+ # Libraries
7
+ import pandas as pd
8
+ import numpy as np
9
+ import seaborn as sns
10
+ import matplotlib.pyplot as plt
11
+ import statsmodels.formula.api as smf
12
+ import statsmodels.api as sm
13
+ from scipy.stats import t
14
+
15
+ print('=' * 80 + '\n\n' + 'OUTPUT FROM: supplemental/increasingly extreme recommendations/10_partisanship_increase.py' + '\n\n')
16
+
17
+
18
+ # Data from Youtube Rec
19
+ wage_data = pd.read_csv('../data/supplemental/metadata and ratings/metadata_with_lables_binary_only_checked_0410.csv')
20
+
21
+ # GPT label (continuous) --> full tree data
22
+ gpt_labels_cont = pd.read_csv("../data/supplemental/metadata and ratings/gpt_continuous_ratings_minwage_FULL_averaged (1).csv")[['naijia_originId','gpt_continuous']]
23
+ gpt_labels_cont = gpt_labels_cont.rename(columns={'naijia_originId':'originID','gpt_continuous':'gpt_label'})
24
+
25
+ # eliminate duplicates if any
26
+ gpt_labels_cont = gpt_labels_cont.groupby('originID').agg({"gpt_label":"mean"}).reset_index()
27
+
28
+ # ## Min Wage Increase - average recommendation on the average video
29
+
30
+ pairs = wage_data[['originID','recID','step']]
31
+ pairs = pairs.merge(gpt_labels_cont, how='left',on='originID').rename(columns={'gpt_label':'gpt_label_originID'})
32
+
33
+
34
+ # data format: Each row is a (current, recommended) video pair
35
+ # cols: cur video ID, cur video rating, cur video rec, cur video rec rating
36
+ pairs = pairs.merge(gpt_labels_cont,
37
+ how='left',
38
+ left_on='recID',
39
+ right_on='originID').rename(columns={'gpt_label':'gpt_label_recID',
40
+ 'originID_x':'originID'}).drop(columns=['originID_y'])
41
+
42
+ pairs = pairs[(pairs.gpt_label_originID.isnull() == False) &
43
+ (pairs.gpt_label_recID.isnull() == False)]
44
+
45
+ # weight videos by the number of recommendations they have
46
+ weights = pairs.groupby('originID').agg({"recID":
47
+ "nunique"}).reset_index().rename(columns={"recID":
48
+ "weight"})
49
+ pairs = pairs.merge(weights, how='left',on='originID')
50
+
51
+ # Difference = Recommended Score − Current Score
52
+ pairs['difference'] = pairs['gpt_label_recID'] - pairs['gpt_label_originID']
53
+ pairs['weighted_difference'] = pairs['difference'] / pairs['weight']
54
+
55
+ ## liberal/conservative categorization
56
+ def label_category(row):
57
+ if row['gpt_label_originID'] > 0:
58
+ return 'conservative'
59
+ else:
60
+ return 'liberal'
61
+
62
+ pairs['label_category'] = pairs.apply(label_category, axis=1)
63
+
64
+ # Liberal Cur Videos
65
+ liberal = pairs[pairs.label_category == 'liberal']
66
+
67
+
68
+ # Constant for the intercept
69
+ lib_X = sm.add_constant(liberal['gpt_label_originID'])
70
+
71
+ # OLS model with two-way clustering
72
+ model = sm.OLS(liberal['weighted_difference'], lib_X)
73
+ lib_results = model.fit(cov_type='cluster',
74
+ cov_kwds={'groups': [liberal['originID'].tolist(), liberal['recID'].tolist()]})
75
+ print(lib_results.summary())
76
+
77
+
78
+ # Conservative Cur Videos
79
+ conservative = pairs[pairs.label_category == 'conservative']
80
+
81
+ # Constant for the intercept
82
+ cons_X = sm.add_constant(conservative['gpt_label_originID'])
83
+
84
+ # OLS model with two-way clustering
85
+ model = sm.OLS(conservative['weighted_difference'], cons_X)
86
+ cons_results = model.fit(cov_type='cluster',
87
+ cov_kwds={'groups': [conservative['originID'].tolist(), conservative['recID'].tolist()]})
88
+ print(cons_results.summary())
89
+
90
+
91
+ ### SI Figure
92
+
93
+ fig, ax = plt.subplots()
94
+
95
+ lib_preds = lib_results.get_prediction(lib_X).summary_frame(alpha=0.05)
96
+ cons_preds = cons_results.get_prediction(cons_X).summary_frame(alpha=0.05)
97
+
98
+ ax.scatter(liberal['gpt_label_originID'],
99
+ liberal['gpt_label_originID'] + liberal['weighted_difference'],
100
+ color='lightblue', s=0.9, alpha=0.6)
101
+
102
+ ax.scatter(conservative['gpt_label_originID'],
103
+ conservative['gpt_label_originID'] + conservative['weighted_difference'],
104
+ color='lightcoral', s=0.9, alpha=0.6)
105
+
106
+ ax.fill_between(liberal['gpt_label_originID'],
107
+ liberal['gpt_label_originID'] + lib_preds['mean_ci_lower'],
108
+ liberal['gpt_label_originID'] + lib_preds['mean_ci_upper'],
109
+ alpha=.4, color='blue')
110
+
111
+ ax.fill_between(conservative['gpt_label_originID'],
112
+ conservative['gpt_label_originID'] + cons_preds['mean_ci_lower'],
113
+ conservative['gpt_label_originID'] + cons_preds['mean_ci_upper'],
114
+ alpha=.4, color='red')
115
+
116
+ ax.plot(liberal['gpt_label_originID'],
117
+ lib_preds['mean'] + liberal['gpt_label_originID'],
118
+ color='darkblue', linewidth=0.5)
119
+
120
+ ax.plot(conservative['gpt_label_originID'],
121
+ cons_preds['mean'] + conservative['gpt_label_originID'],
122
+ color='darkred', linewidth=0.5)
123
+
124
+ ax.plot([-1, 1], [-1, 1], 'k--', linewidth=1.5)
125
+
126
+ # Customize the plot
127
+ ax.set_xlabel('Current Video Rating')
128
+ ax.set_ylabel('Recommended Video Rating')
129
+ ax.set_xlim(-1, 1)
130
+ ax.set_ylim(-1, 1)
131
+ ax.grid(False)
132
+ ax.set_title('Current Video Rating vs \n Recommended Video Rating')
133
+ plt.savefig('../results/video_rating_pairs.png', dpi=300, bbox_inches='tight')
134
+
135
+
136
+
code/supplemental/increasingly extreme recommendations/11_gpt_rating_plots.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ """
5
+ This file serves to plot Figures S12 and S13, which show the robustness of the GPT-generated ratings to different
6
+ ways of quantifying a video's political extremeness (BERT ratings and Hosseinmardi's channel labels.)
7
+ """
8
+
9
+ import pandas as pd
10
+ import numpy as np
11
+ import seaborn as sns
12
+ import matplotlib.pyplot as plt
13
+
14
+ print('=' * 80 + '\n\n' + 'OUTPUT FROM: supplemental/increasingly extreme recommendations/11_gpt_rating_plots.py' + '\n\n')
15
+
16
+ # Read in the GPT continuous ratings, along with the other ways of quantifying political expremeness
17
+ gpt_rating_wage = pd.read_csv("../data/supplemental/metadata and ratings/gpt_continuous_ratings_minwage.csv")
18
+ wage_videos_full = pd.read_csv("../data/supplemental/metadata and ratings/bert_rated_wage_videos_all.csv")
19
+
20
+ wage_all_merged = wage_videos_full.merge(gpt_rating_wage, on="naijia_originId")
21
+ # deduplicate
22
+ wage_all_merged = wage_all_merged[["naijia_originId", "homa_explanation_new", "gpt_label", "bert_score", "originCat"]].drop_duplicates()
23
+
24
+
25
+ """
26
+ FIGURE S12: COMPARISON TO BERT MODEL FROM LAI ET AL. (2024)
27
+ """
28
+ print("starting Figure S12...")
29
+
30
+ plt.figure(figsize=(8, 6))
31
+ colors = {'pro': 'blue', 'anti': 'red'}
32
+
33
+ plt.scatter(wage_all_merged['gpt_label'], wage_all_merged['bert_score'], c=wage_all_merged['originCat'].map(colors), alpha=0.7)
34
+
35
+ plt.axhline(0, color='gray', linestyle='--', linewidth=1.5)
36
+ plt.axvline(0, color='gray', linestyle='--', linewidth=1.5)
37
+
38
+ plt.xlabel('GPT Continuous')
39
+ plt.ylabel('BERT Score')
40
+ plt.title('Correlation between GPT Continuous and BERT Score')
41
+
42
+ for cat, color in colors.items():
43
+ plt.scatter([], [], c=color, label=cat)
44
+ plt.legend()
45
+
46
+ plt.grid(True)
47
+ plt.savefig('../results/figure_S12_comparison_to_Lai_et_al_bert_ratings.png')
48
+
49
+ print("...done!")
50
+
51
+
52
+ """
53
+ FIGURE S13: COMPAIRSON WITH HOSSEINMARDI ET AL. (2021)
54
+ """
55
+ print("starting Figure S13...")
56
+
57
+ fig, ax = plt.subplots(figsize=(12, 6))
58
+
59
+ for category, data in wage_all_merged.groupby('homa_explanation_new'):
60
+ sns.histplot(data=data, x='gpt_label', label=category, kde=True, ax=ax, alpha=0.7)
61
+
62
+ plt.title("Distribution of GPT Continuous Scores by Hosseinmardi et al. (2021)", fontsize=16)
63
+ plt.xlabel('GPT Score', fontsize=14)
64
+ plt.ylabel('Frequency', fontsize=14)
65
+ plt.legend(title='Category', fontsize=12, title_fontsize=14)
66
+
67
+ plt.xlim(-1, 1)
68
+ plt.xticks(fontsize=12)
69
+ plt.yticks(fontsize=12)
70
+
71
+ ax.grid(True, linestyle='--', alpha=0.5)
72
+ sns.despine()
73
+ plt.tight_layout()
74
+ plt.savefig('../results/figure_S13_comparison_to_Hosseinmardi_et_al_channel_ratings.png')
75
+
76
+ print("...done!")
code/supplemental/thumbnails (first impressions)/12_thumbnail_analysis.py ADDED
@@ -0,0 +1,938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # THUMBNAILS EXPERIMENT
5
+
6
+ # # Libraries
7
+
8
+ import numpy as np
9
+ import pandas as pd
10
+ import matplotlib.pyplot as plt
11
+ import seaborn as sns
12
+ import scipy.stats as stats
13
+ import json
14
+ import re
15
+ import os, glob
16
+ from collections import Counter
17
+ from statistics import mode
18
+ from sklearn.metrics import accuracy_score, precision_score, recall_score
19
+ from datetime import datetime
20
+ import gc
21
+ import statsmodels.api as sm
22
+ from stargazer.stargazer import Stargazer
23
+
24
+ import warnings
25
+ warnings.filterwarnings("ignore")
26
+
27
+ print('=' * 80 + '\n\n' + 'OUTPUT FROM: supplemental/thumbnails (first impressions)/12_thumbnail_analysis.py' + '\n\n')
28
+
29
+ # Reading Session Logs
30
+
31
+ # Session logs --- pIDHash is our new respondent-id
32
+ with open("../../data/platform session data/sessions.json") as json_file:
33
+ json_data = json.load(json_file)
34
+
35
+
36
+ unique_topics = []
37
+ real_data = []
38
+
39
+ for item in json_data:
40
+
41
+ # Check if the session is completed
42
+ if item['sessionFinished']:
43
+
44
+ # Check if the topic ID is 'min_wage' or 'gun_control'
45
+ if item['topicID'] in ['min_wage','gun_control']:
46
+
47
+ # Convert start time from milliseconds to seconds
48
+ unix_time_seconds_start = item['startTime'] / 1000
49
+
50
+ # Convert the UNIX timestamp to a datetime object
51
+ normal_time_start = datetime.fromtimestamp(unix_time_seconds_start)
52
+
53
+ # Check if the session started on May 23, 2024, after 10 AM
54
+ if (normal_time_start.year == 2024 and
55
+ normal_time_start.month == 5 and
56
+ normal_time_start.day == 23 and
57
+ normal_time_start.hour >= 10):
58
+
59
+ # Add the session to the real_data list
60
+ real_data.append(item)
61
+
62
+
63
+ del json_data
64
+ gc.collect()
65
+
66
+
67
+ print('Total session count:', len(real_data))
68
+
69
+
70
+ # # GPT Ratings
71
+
72
+
73
+ # GPT Labels
74
+ gpt_labels = pd.read_csv('../../data/supplemental/metadata and ratings/gpt_thumb_ratings_withHumanInfo.csv').drop_duplicates()
75
+
76
+ # in case of duplicate labels by GPT, take the first one
77
+ gpt_labels = gpt_labels.drop_duplicates(subset='originId', keep='first')
78
+
79
+ gpt_labels['gpt_thumb_rating'] = gpt_labels['gpt_thumb_rating'].replace('pro.', 'pro') # 2
80
+ gpt_labels['gpt_thumb_rating'] = gpt_labels['gpt_thumb_rating'].replace('anti.', 'anti')# 3
81
+ gpt_labels.gpt_thumb_rating.value_counts() # 1%
82
+
83
+
84
+ # # Gold Standard Labels
85
+
86
+ # these were the videos we actually provided to Jim
87
+ labels_on_platform = pd.concat([pd.read_csv('../../data/supplemental/metadata and ratings/gun_thumbnails_updated_v4(gun_control).csv'), pd.read_csv('../../data/supplemental/metadata and ratings/wage_thumbnails_updated_v4(min_wage).csv')])
88
+
89
+ # Actual ("Ground Truth") Labels
90
+ gun_videos_all_metadata = pd.read_csv('../../data/supplemental/metadata and ratings/metadata_w_label_June_2021_NLversion.csv')
91
+ wage_videos_all_metadata = pd.read_csv('../../data/supplemental/metadata and ratings/metadata_with_lables_binary_only_checked_0410.csv')
92
+ gun_labels = gun_videos_all_metadata[['originID', 'originCat']].dropna().drop_duplicates().rename(columns={"originID": "originId"})
93
+ wage_labels = wage_videos_all_metadata[['originID', 'originCat']].dropna().drop_duplicates().rename(columns={"originID": "originId"})
94
+ gold_labels = pd.concat([gun_labels, wage_labels], axis = 0)
95
+
96
+
97
+ # # Curate a Subset of "Easy to Rate" Videos
98
+
99
+ bert_ratings = pd.read_csv('../../data/supplemental/metadata and ratings/bert_rated_wage_videos_all.csv')
100
+
101
+ gpt_continuous_extremenss_ratings = pd.read_csv('../../data/supplemental/metadata and ratings/gpt_continuous_ratings_minwage.csv').drop_duplicates()
102
+ gpt_continuous_extremenss_ratings = gpt_continuous_extremenss_ratings.rename(columns={"naijia_originId": "originID"})
103
+ gpt_continuous_extremenss_ratings = gpt_continuous_extremenss_ratings[["originID", "gpt_label"]]
104
+
105
+ bert_ratings = bert_ratings.rename(columns={"naijia_originId": "originID"})
106
+
107
+ ratings_aggregated = pd.merge(gpt_continuous_extremenss_ratings, bert_ratings[["originID", "originCat", "bert_score"]], how= "inner", on = 'originID').drop_duplicates()
108
+
109
+ # convert gpt label to 'pro' if it is < 0 and 'anti' otherwise
110
+ ratings_aggregated['gpt_label'] = np.where(ratings_aggregated['gpt_label'] < 0, 'pro', 'anti')
111
+ # do the same thing for bert_score
112
+ ratings_aggregated['bert_score'] = np.where(ratings_aggregated['bert_score'] < 0, 'pro', 'anti')
113
+
114
+ # get cases where all 3 agree
115
+ ratings_aggregated['all_agree'] = np.where((ratings_aggregated['gpt_label'] == ratings_aggregated['bert_score']) & (ratings_aggregated['gpt_label'] == ratings_aggregated['originCat']), 1, 0)
116
+
117
+
118
+ # This is a subset of minimum wage videos that is "easy to rate": GPT, BERT, and the human Turkers all got this correct
119
+
120
+ MINWAGE_AGREED_VIDEOS = ratings_aggregated[ratings_aggregated['all_agree'] == 1]
121
+
122
+ # get cases where GPT and BERT agree, but humans (originCat) disagrees
123
+ ratings_aggregated['gpt_bert_agree'] = np.where((ratings_aggregated['gpt_label'] == ratings_aggregated['bert_score']) & (ratings_aggregated['gpt_label'] != ratings_aggregated['originCat']), 1, 0)
124
+
125
+ # peek at specific videos
126
+ gpt_continuous_extremenss_ratings[gpt_continuous_extremenss_ratings["originID"]=="Z_r5TlBdjEM"]
127
+
128
+ # this happened in just 7 out of 154 cases...
129
+ ratings_aggregated['gpt_bert_agree'].value_counts()
130
+
131
+ # .... or 4.5%!
132
+ ratings_aggregated['gpt_bert_agree'].value_counts()[1]/len(ratings_aggregated)
133
+
134
+ ratings_aggregated[ratings_aggregated['gpt_bert_agree']==1]
135
+
136
+
137
+ # # Session Level Performance I
138
+
139
+ # Including videos without GPT labels (both no label and 'insufficient information' label cases)
140
+
141
+ def session_rep_counts(data):
142
+ exp_sessions_wage, exp_sessions_gun = [], []
143
+ exp_indexes_wage, exp_indexes_gun = [], []
144
+ exp_indexes_wage_resp, exp_indexes_gun_resp = [], []
145
+
146
+ for i in range(0, len(data)): # Iterate over all elements in the dictionary
147
+
148
+ # Only completed sessions for our surveys
149
+ if data[i]['sessionFinished'] and data[i]['topicID'] in ['min_wage', 'gun_control'] and len(data[i]['ratingResults']) > 0:
150
+
151
+ topic_id = data[i]['topicID']
152
+ resp_id = data[i]['pIDHash']
153
+
154
+ if data[i]['topicID'] == 'min_wage':
155
+ exp_sessions_wage.append(topic_id)
156
+ exp_indexes_wage_resp.append(resp_id)
157
+ exp_indexes_wage.append(i)
158
+
159
+ elif data[i]['topicID'] == 'gun_control':
160
+ exp_sessions_gun.append(topic_id)
161
+ exp_indexes_gun_resp.append(resp_id)
162
+ exp_indexes_gun.append(i)
163
+
164
+ print('Wage session count:',len(exp_indexes_wage))
165
+ print('Gun session count:',len(exp_indexes_gun))
166
+
167
+ print('Unique respondents (authID) in Wage:',len(np.unique(exp_indexes_wage_resp)))
168
+ print('Unique respondents (authID) in Gun',len(np.unique(exp_indexes_gun_resp)))
169
+
170
+ return exp_indexes_wage, exp_indexes_gun
171
+
172
+ def exp_analysis(data, index, total_count, pro_count, anti_count, insuf_count, nolabel_count, exc):
173
+
174
+ gold_matches = []
175
+ pro_matches = []
176
+ anti_matches = []
177
+
178
+ for item in data[index]['ratingResults']:
179
+
180
+ total_count += 1
181
+ exp_index = int(item['index'])
182
+
183
+ if data[index]['topicID'] == 'min_wage':
184
+ if exp_index == 1:
185
+ exp_label = 'pro'
186
+ elif exp_index == 2:
187
+ exp_label = 'anti'
188
+ elif exp_index == 3:
189
+ exp_label = 'insufficient data.'
190
+ elif data[index]['topicID'] == 'gun_control':
191
+ if exp_index == 1:
192
+ exp_label = 'anti'
193
+ elif exp_index == 2:
194
+ exp_label = 'pro'
195
+ elif exp_index == 3:
196
+ exp_label = 'insufficient data.'
197
+
198
+ try:
199
+ gpt_label = gpt_labels[gpt_labels.originId == item['vid']].gpt_thumb_rating.values[0]
200
+ except:
201
+ gpt_label = 'no_label'
202
+
203
+ try:
204
+ gold_label = gold_labels[gold_labels.originId == item['vid']].originCat.values[0]
205
+ except:
206
+ gold_label = 'no_label'
207
+
208
+ if gold_label == 'pro':
209
+ pro_count += 1
210
+ elif gold_label == 'anti':
211
+ anti_count += 1
212
+ elif gold_label == 'insufficient data.':
213
+ insuf_count += 1
214
+ else:
215
+ nolabel_count += 1
216
+
217
+ if exc == 0:
218
+ gold_matches.append(1 if exp_label == gold_label else 0)
219
+ # determine whether it's an anti or pro match
220
+ if exp_label == 'pro':
221
+ pro_matches.append(1 if exp_label == gold_label else 0)
222
+ elif exp_label == 'anti':
223
+ anti_matches.append(1 if exp_label == gold_label else 0)
224
+ elif exc == 1:
225
+ if gpt_label == 'insufficient data.' or pd.isnull(gold_label):
226
+ continue
227
+ else:
228
+ gold_matches.append(1 if exp_label == gold_label else 0)
229
+
230
+ return gold_matches, pro_matches, anti_matches, pro_count, anti_count, insuf_count, nolabel_count, total_count
231
+
232
+ def results(total_count,insuf_count,nolabel_count,pro_count,anti_count,gold_matches):
233
+ print('Number of videos:', total_count)
234
+ print('Number of labeled videos:', total_count - insuf_count - nolabel_count)
235
+ print('Number of pro videos:', pro_count)
236
+ print('Number of anti videos:', anti_count)
237
+ print('Number of vague videos:', insuf_count)
238
+ print('Number of non labeled videos:', nolabel_count)
239
+ print("***")
240
+ print('Total number of matches with GPT:', np.sum(gold_matches))
241
+ print("***")
242
+ print('Total % of matches with GPT %', np.round(np.sum(gold_matches) / len(gold_matches), 2) * 100)
243
+ print("***")
244
+ print('')
245
+
246
+ # json_data is the json data
247
+ # exc 0 if we want to include videos without GPT labels, 1 otw.
248
+ def thumbnail_exp_check(data, exc=0):
249
+
250
+ result_df = pd.DataFrame(columns = ['session_id',
251
+ 'topic_id',
252
+ 'respondent_id',
253
+ 'total_video_count',
254
+ 'respondent_label_count',
255
+ 'gold_insufficient_video_count',
256
+ 'gold_nolabel_video_count',
257
+ 'gold_pro_video_count',
258
+ 'gold_anti_video_count',
259
+ 'gold_match_count',
260
+ 'gold_pro_match_count',
261
+ 'gold_anti_match_count'
262
+ ])
263
+
264
+ print('Summary Statistics')
265
+ exp_indexes_wage, exp_indexes_gun = session_rep_counts(data) # session and unique respondent counts
266
+
267
+ # Check all matches for each experiment
268
+ for index in exp_indexes_wage + exp_indexes_gun:
269
+
270
+ if data[index]['sessionFinished']:
271
+
272
+ if len(data[index]['ratingResults']) > 0:
273
+
274
+ pro_count, anti_count, insuf_count, nolabel_count, total_count = 0, 0, 0, 0, 0
275
+
276
+ gold_matches, pro_matches, anti_matches, pro_count, anti_count, insuf_count, nolabel_count, total_count = exp_analysis(data, index, total_count, pro_count,
277
+ anti_count, insuf_count, nolabel_count,
278
+ exc=exc)
279
+
280
+ resp_id = data[index]['pIDHash']
281
+
282
+ row = [index,
283
+ data[index]['topicID'],
284
+ resp_id,
285
+ len(data[index]['ratingResults']),
286
+ total_count,
287
+ insuf_count,
288
+ nolabel_count,
289
+ pro_count,
290
+ anti_count,
291
+ np.sum(gold_matches),
292
+ np.sum(pro_matches),
293
+ np.sum(anti_matches)
294
+ ]
295
+ row_df = pd.DataFrame(row).T
296
+ row_df.columns = ['session_id',
297
+ 'topic_id',
298
+ 'respondent_id',
299
+ 'total_video_count',
300
+ 'respondent_label_count',
301
+ 'gold_insufficient_video_count',
302
+ 'gold_nolabel_video_count',
303
+ 'gold_pro_video_count',
304
+ 'gold_anti_video_count',
305
+ 'gold_match_count',
306
+ 'gold_pro_match_count',
307
+ 'gold_anti_match_count'
308
+ ]
309
+
310
+ result_df = pd.concat([result_df, row_df], axis=0)
311
+
312
+ result_df['gold_match_perc'] = result_df['gold_match_count'] / result_df['total_video_count']
313
+ ## look at the specific breakdown of pro versus anti
314
+ result_df['gold_pro_match_perc'] = result_df['gold_pro_match_count'] / result_df['gold_pro_video_count']
315
+ result_df['gold_anti_match_perc'] = result_df['gold_anti_match_count'] / result_df['gold_anti_video_count']
316
+
317
+ return result_df
318
+
319
+ # SESSION LEVEL RESULT
320
+ result_df = thumbnail_exp_check(real_data, exc=0)
321
+
322
+ print("number of unique participants")
323
+ len(result_df)
324
+
325
+ result_df["total_video_count"].value_counts()
326
+
327
+ def calculate_quartiles(series):
328
+ rounded = []
329
+ for i in series.quantile([0.25, 0.5, 0.75]).to_list():
330
+ rounded.append(np.round(i,2))
331
+ return rounded
332
+
333
+ result_df.groupby('topic_id').agg(
334
+ session_count=('session_id', 'nunique'),
335
+ respondent_count=('respondent_id', 'nunique'),
336
+ gold_match_mean=('gold_match_perc', 'mean'),
337
+ gold_match_std=('gold_match_perc', 'std'),
338
+ gold_match_quartiles=('gold_match_perc', calculate_quartiles)
339
+ ).reset_index()
340
+
341
+ # examine the results by pro/anti
342
+ results_by_pro_anti = result_df.groupby('topic_id').agg(
343
+ session_count=('session_id', 'nunique'),
344
+ respondent_count=('respondent_id', 'nunique'),
345
+ pro_gold_match_mean=('gold_pro_match_perc', 'mean'),
346
+ pro_gold_match_std=('gold_pro_match_perc', 'std'),
347
+ pro_gold_match_quartiles=('gold_pro_match_perc', calculate_quartiles),
348
+ anti_gold_match_mean=('gold_anti_match_perc', 'mean'),
349
+ anti_gold_match_std=('gold_anti_match_perc', 'std'),
350
+ anti_gold_match_quartiles=('gold_anti_match_perc', calculate_quartiles)
351
+ ).reset_index()
352
+
353
+ melted_df = pd.melt(results_by_pro_anti,
354
+ id_vars=['topic_id', 'session_count', 'respondent_count'],
355
+ value_vars=[
356
+ 'pro_gold_match_mean',
357
+ 'pro_gold_match_std',
358
+ 'pro_gold_match_quartiles',
359
+ 'anti_gold_match_mean',
360
+ 'anti_gold_match_std',
361
+ 'anti_gold_match_quartiles'],
362
+ var_name='stat_type', value_name='value')
363
+ melted_df['type'] = melted_df['stat_type'].apply(lambda x: x.split('_')[0])
364
+ melted_df['statistic'] = melted_df['stat_type'].apply(lambda x: x.split('_')[-1])
365
+ melted_df = melted_df.sort_values(by=['topic_id', 'statistic']).drop('stat_type', axis=1)
366
+ melted_df
367
+
368
+ print("t-test for gold match percentage (pooled)")
369
+ all_gold_match_perc = np.asarray([float(num) for num in result_df["gold_match_perc"]])
370
+ t_statistic, p_value = stats.ttest_1samp(a=all_gold_match_perc, popmean=0.5)
371
+ print("t-statistic:", t_statistic)
372
+ print("p-value:", p_value)
373
+
374
+ print("overall accuracy")
375
+ print(np.mean(all_gold_match_perc))
376
+
377
+ print("t-test for gold match percentage (liberal gun control; operationalizing random as 1/3)")
378
+ gun_result_df = result_df[result_df["topic_id"] == "gun_control"]
379
+ gun_gold_match_perc = np.asarray([float(num) for num in gun_result_df["gold_pro_match_perc"]])
380
+ t_statistic, p_value = stats.ttest_1samp(a=gun_gold_match_perc, popmean=0.333)
381
+ print("t-statistic:", t_statistic)
382
+ print("p-value:", p_value)
383
+
384
+ result_df_broken_by_proanti = result_df[["topic_id", "gold_pro_match_perc", "gold_anti_match_perc"]]
385
+ result_df_proanti_melted = pd.melt(result_df_broken_by_proanti,
386
+ id_vars=['topic_id'],
387
+ value_vars=[
388
+ 'gold_pro_match_perc',
389
+ 'gold_anti_match_perc'],
390
+ var_name='stat_type', value_name='value')
391
+ result_df_proanti_melted['Video Gold Label'] = result_df_proanti_melted['stat_type'].apply(lambda x: x.split('_')[1])
392
+ result_df_proanti_melted = result_df_proanti_melted.drop('stat_type', axis=1)
393
+
394
+ sns.boxplot(x='topic_id', y='gold_match_perc', data=result_df)
395
+ plt.show()
396
+
397
+ # make the labels more understandable
398
+ result_df_proanti_melted["Video Gold Label"]= result_df_proanti_melted["Video Gold Label"].replace({"pro": "Liberal", "anti": "Conservative"})
399
+ result_df_proanti_melted["topic_id"]= result_df_proanti_melted["topic_id"].replace({"min_wage": "Minimum Wage", "gun_control": "Gun Control"})
400
+
401
+ sns.boxplot(x='topic_id', y='value', hue='Video Gold Label', data=result_df_proanti_melted)
402
+ # add a horizontal line for the 50% mark
403
+ plt.axhline(0.5, color='r', linestyle='--')
404
+ plt.title("Individual Raters' Percentage Match by Topic and Gold Label")
405
+ plt.xlabel("Topic")
406
+ plt.ylabel("Percentage Match with Gold")
407
+ plt.show()
408
+
409
+
410
+ # # Video Level Performance
411
+ exp_indexes_wage, exp_indexes_gun = session_rep_counts(real_data)
412
+
413
+ indexes = exp_indexes_wage + exp_indexes_gun
414
+ videos = {}
415
+ videos['gun_control'] = {}
416
+ videos['min_wage'] = {}
417
+
418
+ for index in indexes:
419
+
420
+ ratings = real_data[index]['ratingResults']
421
+ for rating in ratings:
422
+ video = rating['vid']
423
+ exp_index = int(rating['index'])
424
+
425
+ if real_data[index]['topicID'] == 'min_wage':
426
+ if exp_index == 1:
427
+ exp_label = 'pro'
428
+ elif exp_index == 2:
429
+ exp_label = 'anti'
430
+ elif exp_index == 3:
431
+ exp_label = 'insufficient data.'
432
+ elif real_data[index]['topicID'] == 'gun_control':
433
+ if exp_index == 1:
434
+ exp_label = 'anti'
435
+ elif exp_index == 2:
436
+ exp_label = 'pro'
437
+ elif exp_index == 3:
438
+ exp_label = 'insufficient data.'
439
+
440
+ if real_data[index]['topicID'] == 'min_wage':
441
+ if video not in videos['min_wage'].keys():
442
+ videos['min_wage'][video] = [exp_label]
443
+ else:
444
+ videos['min_wage'][video].append(exp_label)
445
+
446
+ elif real_data[index]['topicID'] == 'gun_control':
447
+ if video not in videos['gun_control'].keys():
448
+ videos['gun_control'][video] = [exp_label]
449
+ else:
450
+ videos['gun_control'][video].append(exp_label)
451
+
452
+ for video, labels in videos.items():
453
+ print(video)
454
+
455
+ majority_votes = {}
456
+ vote_counts = []
457
+ topics = []
458
+ num_votes_for_majority = []
459
+ majority_vote_drop_insuf = []
460
+
461
+ for topicid, videolist in videos.items():
462
+
463
+ for video, labels in videolist.items():
464
+ majority_vote = mode(labels)
465
+ vote_count = len(labels)
466
+ votes_for_majority = [label for label in labels if label == majority_vote]
467
+ num_votes_for_majority.append(len(votes_for_majority))
468
+
469
+ # a version of majority_vote if we remove votes for "insufficient data."
470
+ majority_vote_drop_insuf.append(mode([label for label in labels if label != 'insufficient data.']))
471
+
472
+ # Debug what happens when we have a very small minority as the majority percentage
473
+ # how can the mode have only something like 35%?
474
+
475
+ # Answer -- it's because there's a 3-way split between 'pro,' 'anti,' and 'insufficient data.'
476
+
477
+ # if len(votes_for_majority) / vote_count < 0.4:
478
+ # print('Majority vote:', majority_vote)
479
+ # print('Votes for majority:', len(votes_for_majority))
480
+ # print('Total votes:', vote_count)
481
+ # print('Votes for majority %:', len(votes_for_majority) / vote_count)
482
+ # print('Labels:')
483
+ # print(pd.Series(labels).value_counts())
484
+ # print('***')
485
+
486
+ majority_votes[video] = majority_vote
487
+ vote_counts.append(vote_count)
488
+ topics.append(topicid)
489
+
490
+ majority_voting = pd.concat([pd.Series(majority_votes.keys()),
491
+ pd.Series(topics),
492
+ pd.Series(vote_counts),
493
+ pd.Series(num_votes_for_majority),
494
+ pd.Series(majority_votes.values()),
495
+ pd.Series(majority_vote_drop_insuf)],
496
+ axis=1)
497
+ majority_voting.columns = ['originId','topicID','vote_count', 'num_votes_for_majority', 'majority_label', 'majority_label_drop_insuf']
498
+ print(majority_voting.shape)
499
+
500
+ majority_voting = majority_voting.merge(gold_labels.drop_duplicates(), how='left',on='originId')
501
+ majority_voting = majority_voting.merge(gpt_labels.drop_duplicates(), how='left',on='originId')
502
+
503
+ majority_voting.head()
504
+ majority_voting.originCat.value_counts()
505
+ majority_voting.gpt_thumb_rating.value_counts()
506
+
507
+ # Encode the labels
508
+ majority_voting['gold_label_encoded'] = majority_voting['originCat'].map({'anti': 0,
509
+ 'pro': 1,
510
+ 'other': 2})
511
+
512
+ majority_voting['majority_label_encoded'] = majority_voting['majority_label'].map({'anti': 0,
513
+ 'pro': 1,
514
+ 'insufficient data.': 2})
515
+
516
+ majority_voting['majority_label_drop_insuf_encoded'] = majority_voting['majority_label_drop_insuf'].map({'anti': 0,
517
+ 'pro': 1})
518
+
519
+ majority_voting['gpt_encoded'] = majority_voting['gpt_thumb_rating'].map({'anti': 0,
520
+ 'pro': 1,
521
+ 'insufficient data.': 2})
522
+
523
+ def performance_metrics(filtered_df, topicID='OVERALL'):
524
+ print('**********')
525
+ print(f'{topicID}')
526
+
527
+ if topicID == 'OVERALL':
528
+ accuracy = accuracy_score(filtered_df['gold_label_encoded'],
529
+ filtered_df['majority_label_encoded'])
530
+ precision = precision_score(filtered_df['gold_label_encoded'],
531
+ filtered_df['majority_label_encoded'],
532
+ labels = [0,1,2],
533
+ average='weighted',
534
+ zero_division=0)
535
+ recall = recall_score(filtered_df['gold_label_encoded'],
536
+ filtered_df['majority_label_encoded'],
537
+ labels = [0,1,2],
538
+ average='weighted',
539
+ zero_division=0)
540
+ else:
541
+ accuracy = accuracy_score(filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
542
+ filtered_df[filtered_df.topicID == f'{topicID}']['majority_label_encoded'])
543
+ precision = precision_score(filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
544
+ filtered_df[filtered_df.topicID == f'{topicID}']['majority_label_encoded'],
545
+ labels = [0,1,2],
546
+ average='weighted',
547
+ zero_division=0)
548
+ recall = recall_score(y_true = filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
549
+ y_pred = filtered_df[filtered_df.topicID == f'{topicID}']['majority_label_encoded'],
550
+ labels = [0,1,2],
551
+ average= 'weighted',
552
+ zero_division=0)
553
+
554
+ print(f'Accuracy: {accuracy:.2f}')
555
+ print(f'Precision: {precision:.2f}')
556
+ print(f'Recall: {recall:.2f}')
557
+ print('**********')
558
+
559
+ majority_voting_nonNullOriginCat = majority_voting[majority_voting.originCat.isnull() == False]
560
+
561
+ majority_voting_nonNullOriginCat["majority_label"].value_counts()
562
+
563
+ pro_only_maj_vote = majority_voting_nonNullOriginCat[majority_voting_nonNullOriginCat['originCat'] == 'pro']
564
+ anti_only_maj_vote = majority_voting_nonNullOriginCat[majority_voting_nonNullOriginCat['originCat'] == 'anti']
565
+ # flip all the labels for anti_only
566
+ anti_only_maj_vote = anti_only_maj_vote.copy()
567
+ anti_only_maj_vote.loc[:, 'majority_label_encoded'] = anti_only_maj_vote['majority_label_encoded'].replace({0: 1, 1: 0})
568
+ anti_only_maj_vote.loc[:, 'gold_label_encoded'] = anti_only_maj_vote['gold_label_encoded'].replace({0: 1, 1: 0})
569
+ anti_only_maj_vote.loc[:, 'majority_label_drop_insuf_encoded'] = anti_only_maj_vote['majority_label_drop_insuf_encoded'].replace({0: 1, 1: 0})
570
+
571
+ print("number of videos that had a valid original rating")
572
+ print(len(majority_voting_nonNullOriginCat))
573
+
574
+ majority_voting_nonNullOriginCat.loc[:, "is_human_match"] = majority_voting_nonNullOriginCat["gold_label_encoded"] == majority_voting_nonNullOriginCat["majority_label_encoded"]
575
+
576
+ print("t-test for gold match percentage (general, majority vote)")
577
+ all_gold_match_perc = np.asarray([float(num) for num in majority_voting_nonNullOriginCat["is_human_match"]])
578
+ t_statistic, p_value = stats.ttest_1samp(a=all_gold_match_perc, popmean=0.5)
579
+ print("t-statistic:", t_statistic)
580
+ print("p-value:", p_value)
581
+
582
+ print('Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
583
+ performance_metrics(majority_voting_nonNullOriginCat, topicID='OVERALL')
584
+ performance_metrics(majority_voting_nonNullOriginCat, topicID='min_wage')
585
+ performance_metrics(majority_voting_nonNullOriginCat, topicID='gun_control')
586
+
587
+ print('PRO VIDEOS: Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
588
+ performance_metrics(pro_only_maj_vote, topicID='OVERALL')
589
+ performance_metrics(pro_only_maj_vote, topicID='min_wage')
590
+ performance_metrics(pro_only_maj_vote, topicID='gun_control')
591
+
592
+ print('ANTI VIDEOS: Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
593
+ performance_metrics(anti_only_maj_vote, topicID='OVERALL')
594
+ performance_metrics(anti_only_maj_vote, topicID='min_wage')
595
+ performance_metrics(anti_only_maj_vote, topicID='gun_control')
596
+
597
+
598
+ # # Gold Standard v. Majority Label (Dropping Insufficient Data)
599
+
600
+ def performance_metrics_drop_insuf(filtered_df, topicID='OVERALL'):
601
+ print('**********')
602
+ print(f'{topicID}')
603
+
604
+ if topicID == 'OVERALL':
605
+ accuracy = accuracy_score(filtered_df['gold_label_encoded'],
606
+ filtered_df['majority_label_drop_insuf_encoded'])
607
+ precision = precision_score(filtered_df['gold_label_encoded'],
608
+ filtered_df['majority_label_drop_insuf_encoded'],
609
+ labels = [0,1],
610
+ average='weighted',
611
+ zero_division=0)
612
+ recall = recall_score(filtered_df['gold_label_encoded'],
613
+ filtered_df['majority_label_drop_insuf_encoded'],
614
+ labels = [0,1],
615
+ average='weighted',
616
+ zero_division=0)
617
+ else:
618
+ accuracy = accuracy_score(filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
619
+ filtered_df[filtered_df.topicID == f'{topicID}']['majority_label_drop_insuf_encoded'])
620
+ precision = precision_score(filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
621
+ filtered_df[filtered_df.topicID == f'{topicID}']['majority_label_drop_insuf_encoded'],
622
+ labels = [0,1],
623
+ average='weighted',
624
+ zero_division=0)
625
+ recall = recall_score(y_true = filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
626
+ y_pred = filtered_df[filtered_df.topicID == f'{topicID}']['majority_label_drop_insuf_encoded'],
627
+ labels = [0,1],
628
+ average= 'weighted',
629
+ zero_division=0)
630
+
631
+ print(f'Accuracy: {accuracy:.2f}')
632
+ print(f'Precision: {precision:.2f}')
633
+ print(f'Recall: {recall:.2f}')
634
+ print('**********')
635
+
636
+ print('Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
637
+ performance_metrics_drop_insuf(majority_voting_nonNullOriginCat, topicID='OVERALL')
638
+ performance_metrics_drop_insuf(majority_voting_nonNullOriginCat, topicID='min_wage')
639
+ performance_metrics_drop_insuf(majority_voting_nonNullOriginCat, topicID='gun_control')
640
+
641
+ print('PRO VIDEOS: Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
642
+ performance_metrics_drop_insuf(pro_only_maj_vote, topicID='OVERALL')
643
+ performance_metrics_drop_insuf(pro_only_maj_vote, topicID='min_wage')
644
+ performance_metrics_drop_insuf(pro_only_maj_vote, topicID='gun_control')
645
+
646
+ print('ANTI VIDEOS: Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
647
+ performance_metrics_drop_insuf(anti_only_maj_vote, topicID='OVERALL')
648
+ performance_metrics_drop_insuf(anti_only_maj_vote, topicID='min_wage')
649
+ performance_metrics_drop_insuf(anti_only_maj_vote, topicID='gun_control')
650
+
651
+
652
+ # # Gold Standard v. Majority Label ("Easy" Subset)
653
+ majority_voting_easyOnly = majority_voting_nonNullOriginCat[majority_voting_nonNullOriginCat["originId"].isin(MINWAGE_AGREED_VIDEOS["originID"])]
654
+
655
+ # create the pro- and anti-only sets
656
+ pro_only_maj_vote_easy = majority_voting_easyOnly[majority_voting_easyOnly['originCat'] == 'pro']
657
+ anti_only_maj_vote_easy = majority_voting_easyOnly[majority_voting_easyOnly['originCat'] == 'anti']
658
+ # flip all the labels for anti_only
659
+ anti_only_maj_vote_easy = anti_only_maj_vote_easy.copy()
660
+ anti_only_maj_vote_easy.loc[:, 'majority_label_encoded'] = anti_only_maj_vote_easy['majority_label_encoded'].replace({0: 1, 1: 0})
661
+ anti_only_maj_vote_easy.loc[:, 'gold_label_encoded'] = anti_only_maj_vote_easy['gold_label_encoded'].replace({0: 1, 1: 0})
662
+ anti_only_maj_vote_easy.loc[:, 'majority_label_drop_insuf_encoded'] = anti_only_maj_vote_easy['majority_label_drop_insuf_encoded'].replace({0: 1, 1: 0})
663
+
664
+ print('Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
665
+ performance_metrics(majority_voting_easyOnly, topicID='OVERALL')
666
+
667
+ print('PRO VIDEOS: Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
668
+ performance_metrics_drop_insuf(pro_only_maj_vote_easy, topicID='OVERALL')
669
+
670
+ print('ANTI VIDEOS: Comparing MTURKERS AND OLD GOLD STANDARD LABELS')
671
+ performance_metrics_drop_insuf(anti_only_maj_vote_easy, topicID='OVERALL')
672
+
673
+
674
+ # # Gold Standard versus GPT
675
+ def performance_gpt(filtered_df, topicID='OVERALL'):
676
+ print('**********')
677
+ print(f'{topicID}')
678
+
679
+ if topicID == 'OVERALL':
680
+ accuracy = accuracy_score(filtered_df['gold_label_encoded'],
681
+ filtered_df['gpt_encoded'])
682
+ precision = precision_score(filtered_df['gold_label_encoded'],
683
+ filtered_df['gpt_encoded'],
684
+ labels = [0,1,2],
685
+ average='weighted',
686
+ zero_division=0)
687
+ recall = recall_score(filtered_df['gold_label_encoded'],
688
+ filtered_df['gpt_encoded'],
689
+ labels = [0,1,2],
690
+ average='weighted',
691
+ zero_division=0)
692
+ else:
693
+ accuracy = accuracy_score(filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
694
+ filtered_df[filtered_df.topicID == f'{topicID}']['gpt_encoded'])
695
+ precision = precision_score(filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
696
+ filtered_df[filtered_df.topicID == f'{topicID}']['gpt_encoded'],
697
+ labels = [0,1,2],
698
+ average='weighted',
699
+ zero_division=0)
700
+ recall = recall_score(filtered_df[filtered_df.topicID == f'{topicID}']['gold_label_encoded'],
701
+ filtered_df[filtered_df.topicID == f'{topicID}']['gpt_encoded'],
702
+ labels = [0,1,2],
703
+ average='weighted',
704
+ zero_division=0)
705
+
706
+ print(f'Accuracy: {accuracy:.2f}')
707
+ print(f'Precision: {precision:.2f}')
708
+ print(f'Recall: {recall:.2f}')
709
+ print('**********')
710
+
711
+ majority_voting_nonNullGPT = majority_voting_nonNullOriginCat[majority_voting_nonNullOriginCat.gpt_thumb_rating.isnull()==False]
712
+ pro_only_maj_vote = majority_voting_nonNullGPT[majority_voting_nonNullGPT['originCat'] == 'pro']
713
+ anti_only_maj_vote = majority_voting_nonNullGPT[majority_voting_nonNullGPT['originCat'] == 'anti']
714
+
715
+ # flip all the labels for anti_only
716
+ anti_only_maj_vote = anti_only_maj_vote.copy()
717
+ anti_only_maj_vote.loc[:, 'gold_label_encoded'] = anti_only_maj_vote['gold_label_encoded'].replace({0: 1, 1: 0})
718
+ anti_only_maj_vote.loc[:, 'gpt_encoded'] = anti_only_maj_vote['gpt_encoded'].replace({0: 1, 1: 0})
719
+
720
+ print("number of videos that gpt rated")
721
+ print(len(majority_voting_nonNullGPT))
722
+
723
+ print('Comparing GPT AND OLD GOLD STANDARD LABELS')
724
+ performance_gpt(majority_voting_nonNullGPT, topicID='OVERALL')
725
+ performance_gpt(majority_voting_nonNullGPT, topicID='min_wage')
726
+ performance_gpt(majority_voting_nonNullGPT, topicID='gun_control')
727
+
728
+ print('PRO VIDEOS: Comparing GPT AND OLD GOLD STANDARD LABELS')
729
+ performance_gpt(pro_only_maj_vote, topicID='OVERALL')
730
+ performance_gpt(pro_only_maj_vote, topicID='min_wage')
731
+ performance_gpt(pro_only_maj_vote, topicID='gun_control')
732
+
733
+ print('ANTI VIDEOS: Comparing GPT AND OLD GOLD STANDARD LABELS')
734
+ performance_gpt(anti_only_maj_vote, topicID='OVERALL')
735
+ performance_gpt(anti_only_maj_vote, topicID='min_wage')
736
+ performance_gpt(anti_only_maj_vote, topicID='gun_control')
737
+
738
+ print("t-test for gold match percentage (general, majority vote)")
739
+ majority_voting_nonNullGPT.loc[:, "is_human_match"] = majority_voting_nonNullGPT["gold_label_encoded"] == majority_voting_nonNullGPT["majority_label_encoded"]
740
+ majority_voting_nonNullGPT.loc[:, "is_human_match_drop_insuf"] = majority_voting_nonNullGPT["gold_label_encoded"] == majority_voting_nonNullGPT["majority_label_drop_insuf_encoded"]
741
+ majority_voting_nonNullGPT.loc[:, "is_gpt_match"] = majority_voting_nonNullGPT["gold_label_encoded"] == majority_voting_nonNullGPT["gpt_encoded"]
742
+
743
+ print("Humans v. GPT")
744
+ all_gold_match_perc = np.asarray([float(num) for num in majority_voting_nonNullGPT["is_human_match"]])
745
+ t_statistic, p_value = stats.ttest_1samp(a=all_gold_match_perc, popmean=np.mean(majority_voting_nonNullGPT["is_gpt_match"]))
746
+ print("t-statistic:", t_statistic)
747
+ print("p-value:", p_value)
748
+
749
+ print("Humans (with 'Insufficient Data' Dropped) v. GPT")
750
+ all_gold_match_perc = np.asarray([float(num) for num in majority_voting_nonNullGPT["is_human_match_drop_insuf"]])
751
+ t_statistic, p_value = stats.ttest_1samp(a=all_gold_match_perc, popmean=np.mean(majority_voting_nonNullGPT["is_gpt_match"]))
752
+ print("t-statistic:", t_statistic)
753
+ print("p-value:", p_value)
754
+
755
+
756
+ # # What percentage of video thumbnails were 'clearly partisan?'
757
+ majority_voting['Majority Vote Percentage'] = majority_voting['num_votes_for_majority'] / majority_voting['vote_count']
758
+
759
+ # plot a histogram of the majority vote percentage
760
+ sns.kdeplot(data=majority_voting, x='Majority Vote Percentage', hue='originCat')
761
+ # add a vertical line at 0.5
762
+ plt.axvline(0.5, color='r', linestyle='--')
763
+ plt.axvline(0.8, color='lightpink', linestyle='--')
764
+ plt.show()
765
+
766
+ majority_voting_wage = majority_voting[majority_voting.topicID == 'min_wage']
767
+ majority_voting_gun = majority_voting[majority_voting.topicID == 'gun_control']
768
+
769
+ sns.kdeplot(data=majority_voting_gun, x='Majority Vote Percentage', hue='originCat')
770
+ # add a vertical line at 0.5
771
+ plt.axvline(0.5, color='r', linestyle='--')
772
+ plt.axvline(0.8, color='lightpink', linestyle='--')
773
+ plt.show()
774
+
775
+
776
+ # ## Can we connect it with the original choices people made?
777
+
778
+ # functions to get the original trees
779
+ def explore_branches(tree_df, all_trees_by_ID, all_trees_by_channelID, row_index = 0, step = 0):
780
+ parent = tree_df.iloc[row_index]["originId"] # start with the first row
781
+ channel_id = tree_df.iloc[row_index]["originChannelId"]
782
+
783
+ # create set of unique keys per step
784
+ if step not in all_trees_by_ID.keys():
785
+ all_trees_by_ID[step] = set()
786
+ if step not in all_trees_by_channelID.keys():
787
+ all_trees_by_channelID[step] = set()
788
+
789
+ all_trees_by_ID[step].add(parent) # add the parent's video ID to the relevant step
790
+ all_trees_by_channelID[step].add(channel_id) # also store the channel ID
791
+
792
+ for i in range(1, 4+1): # 4 + 1 because range() only prints up to n-1
793
+ child_node = tree_df.iloc[row_index]["rec"+str(i)]
794
+
795
+ # break if we hit a cycle
796
+ if(child_node in set().union(*all_trees_by_ID.values())):
797
+ break
798
+ else:
799
+ child_row_index = tree_df.index[tree_df['originId'] == child_node].tolist()
800
+ explore_branches(tree_df, all_trees_by_ID, all_trees_by_channelID, child_row_index[0], step+1) # call recursively to get all the tree levels
801
+
802
+ def read_all_trees(tree_files):
803
+
804
+ all_trees_by_ID = {}
805
+ all_trees_by_channelID = {}
806
+ all_tree_files_df = pd.DataFrame()
807
+
808
+ for tree in tree_files:
809
+ print(tree)
810
+
811
+ # populate the tree
812
+ tree_df = pd.read_csv(tree)
813
+
814
+ explore_branches(tree_df, all_trees_by_ID, all_trees_by_channelID) # recusrively parse out video ID's and channel ID's from the trees.
815
+
816
+ # save the dataframe to all_tree_files_df
817
+ if(all_tree_files_df.empty):
818
+ all_tree_files_df = tree_df
819
+ else:
820
+ all_tree_files_df = pd.concat([all_tree_files_df, tree_df], axis=0)
821
+
822
+ return all_trees_by_ID, all_trees_by_channelID
823
+
824
+ tree_files_wage = glob.glob(os.path.join('../recommendation_trees/trees_wage/', '*.csv'))
825
+ tree_files_gun = glob.glob(os.path.join('../recommendation_trees/trees_gun/', '*.csv'))
826
+
827
+
828
+ all_trees_by_ID_wage, all_trees_by_channelID_wage = read_all_trees(tree_files_wage)
829
+
830
+ all_trees_by_ID_gun, all_trees_by_channelID_gun = read_all_trees(tree_files_gun)
831
+
832
+ # get the thing in parentheses as the topicid
833
+ gun_topicids = [re.search(r'\((.*?)\)', filename).group(1) for filename in tree_files_gun]
834
+ wage_topicids = [re.search(r'\((.*?)\)', filename).group(1) for filename in tree_files_wage]
835
+
836
+
837
+ # # Thumbnail Distribution
838
+
839
+ print('Total number of videos shown:',
840
+ len(videos['gun_control']) + len(videos['min_wage']))
841
+
842
+ print('Percentage of videos shown: %',
843
+ np.round((len(videos['gun_control']) + len(videos['min_wage'])) / len(labels_on_platform), 3) * 100)
844
+
845
+ unique_videos_shown = set(list(videos['gun_control'].keys()) + list(videos['min_wage'].keys()))
846
+ unique_videos_in_platform_set = set(labels_on_platform['originId'])
847
+
848
+ len(labels_on_platform)
849
+ len(unique_videos_in_platform_set)
850
+ unique_videos_in_platform_set.difference(unique_videos_shown)
851
+
852
+ video_lengths = {}
853
+ for vids, labellist in videos.items():
854
+ for vid, labels in labellist.items():
855
+ video_lengths[vid] = len(labels)
856
+
857
+
858
+ average_number_of_ratings = np.mean([int(val) for val in video_lengths.values()])
859
+ average_number_of_ratings
860
+
861
+ # plot a histogram of video_lengths.values()
862
+ plt.hist(video_lengths.values(), bins=40)
863
+ # vertical line around the mean (average_number_of_ratings)
864
+ plt.axvline(average_number_of_ratings, color='r', linestyle='dashed', linewidth=1)
865
+ plt.xlabel('Number of Ratings')
866
+ plt.ylabel('Number of Videos')
867
+
868
+
869
+ # # Exploratory Analyses
870
+
871
+ # ## What happens when we drop the cases where someone (either human or GPT) said there wasn't enough information?
872
+ # When a rater abstains by saying there wasn't enough information, it deflates all the metrics, since the gold standard ratings are all binary.
873
+
874
+ majority_voting_rated = majority_voting_nonNullGPT[(majority_voting_nonNullGPT.majority_label != "insufficient data.") & (majority_voting_nonNullGPT.gpt_thumb_rating != "insufficient data.")]
875
+ len(majority_voting_rated) / len(majority_voting_nonNullGPT) # we now have 90% of the original videos
876
+
877
+ len(majority_voting_rated)
878
+
879
+ print('Comparing MTURKERS AND OLD GOLD STANDARD LABELS -- FILTERED')
880
+ performance_metrics(majority_voting_rated, topicID='OVERALL')
881
+ performance_metrics(majority_voting_rated, topicID='min_wage')
882
+ performance_metrics(majority_voting_rated, topicID='gun_control')
883
+
884
+ print('Comparing GPT AND OLD GOLD STANDARD LABELS -- FILTERED')
885
+ performance_gpt(majority_voting_rated, topicID='OVERALL')
886
+ performance_gpt(majority_voting_rated, topicID='min_wage')
887
+ performance_gpt(majority_voting_rated, topicID='gun_control')
888
+
889
+
890
+ # ## Are there any weird patterns that we should filter out?
891
+
892
+ # For example, we might want to explore cases where people answered the same thing all 20 times, or always said 'insufficient information.'
893
+
894
+ INDEXES_TO_EXCLUDE = set() # keep track of indices that we should exclude for various reasons
895
+
896
+ exp_indexes_wage, exp_indexes_gun = session_rep_counts(real_data)
897
+ indexes = exp_indexes_wage + exp_indexes_gun
898
+
899
+ for index in indexes:
900
+
901
+ ratings = real_data[index]['ratingResults']
902
+ ratings_indices = [ratings[i]['index'] for i in range(0, len(ratings))]
903
+
904
+ # These people rated the same thing for all questions
905
+ if len(set(ratings_indices)) == 1:
906
+ INDEXES_TO_EXCLUDE.add(index)
907
+
908
+ # These people saw the same video multiple times but had inconsistent answers
909
+ rating_dict_for_individual = {}
910
+ for rating in ratings:
911
+ if(rating['vid'] not in rating_dict_for_individual.keys()):
912
+ rating_dict_for_individual[rating['vid']] = []
913
+ rating_dict_for_individual[rating['vid']].append(rating['index'])
914
+
915
+ # identify if any keys in rating_dict_for_individual have a length greater than 1
916
+ for key in rating_dict_for_individual.keys():
917
+ if len(rating_dict_for_individual[key]) > 1:
918
+ if(len(set(rating_dict_for_individual[key])) > 1): # these people had inconsistent responses when rating the same video
919
+ INDEXES_TO_EXCLUDE.add(index)
920
+
921
+
922
+ # indices that we exclude for the above data quality reasons (button-smashing and inconsistent responses)
923
+ INDEXES_TO_EXCLUDE
924
+
925
+ indexes_cleaned = [index for index in indexes if index not in INDEXES_TO_EXCLUDE]
926
+
927
+ # filter real_data to indexes_cleaned
928
+ real_data_cleaned = [real_data[index] for index in indexes_cleaned]
929
+ result_df_cleaned = thumbnail_exp_check(real_data_cleaned)
930
+
931
+ # We don't actually do much better because there were only 7 participants dropped for data quality issues
932
+ result_df_cleaned.groupby('topic_id').agg(
933
+ session_count=('session_id', 'nunique'),
934
+ respondent_count=('respondent_id', 'nunique'),
935
+ gold_match_mean=('gold_match_perc', 'mean'),
936
+ gold_match_std=('gold_match_perc', 'std'),
937
+ gold_match_quartiles=('gold_match_perc', calculate_quartiles)
938
+ ).reset_index()
code/supplemental/thumbnails (first impressions)/13_thumbnail_null_comparison.py ADDED
@@ -0,0 +1,893 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Re-analysis of Original Experiments to Evaluate Randomness of Video Recommendation Choices
5
+
6
+ # # Libraries
7
+ import numpy as np
8
+ import pandas as pd
9
+ import matplotlib.pyplot as plt
10
+ import seaborn as sns
11
+ import scipy.stats as stats
12
+ import json
13
+ import re
14
+ import os, glob
15
+ from collections import Counter
16
+ from statistics import mode
17
+ from sklearn.metrics import accuracy_score, precision_score, recall_score
18
+ from datetime import datetime
19
+ import gc
20
+ import statsmodels.api as sm
21
+ from stargazer.stargazer import Stargazer
22
+ import math
23
+ from tqdm import tqdm
24
+ from collections import defaultdict
25
+ import random
26
+
27
+ import warnings
28
+ warnings.filterwarnings("ignore")
29
+
30
+ print('=' * 80 + '\n\n' + 'OUTPUT FROM: supplemental/thumbnails (first impressions)/13_thumbnail_null_comparison.py' + '\n\n')
31
+
32
+ with open("../../data/platform session data/sessions.json") as json_file:
33
+ json_data = json.load(json_file)
34
+
35
+ # Actual ("Ground Truth") Labels
36
+ gun_videos_all_metadata = pd.read_csv('../../data/supplemental/metadata and ratings/metadata_w_label_June_2021_NLversion.csv')
37
+ wage_videos_all_metadata = pd.read_csv('../../data/supplemental/metadata and ratings/metadata_with_lables_binary_only_checked_0410.csv')
38
+ gun_labels = gun_videos_all_metadata[['originID', 'originCat']].dropna().drop_duplicates().rename(columns={"originID": "originId"})
39
+ wage_labels = wage_videos_all_metadata[['originID', 'originCat']].dropna().drop_duplicates().rename(columns={"originID": "originId"})
40
+ gold_labels = pd.concat([gun_labels, wage_labels], axis = 0)
41
+
42
+ # functions to get the original trees
43
+ def explore_branches(tree_df, all_trees_by_ID, all_trees_by_channelID, row_index = 0, step = 0):
44
+ parent = tree_df.iloc[row_index]["originId"] # start with the first row
45
+ channel_id = tree_df.iloc[row_index]["originChannelId"]
46
+
47
+ # create set of unique keys per step
48
+ if step not in all_trees_by_ID.keys():
49
+ all_trees_by_ID[step] = set()
50
+ if step not in all_trees_by_channelID.keys():
51
+ all_trees_by_channelID[step] = set()
52
+
53
+ all_trees_by_ID[step].add(parent) # add the parent's video ID to the relevant step
54
+ all_trees_by_channelID[step].add(channel_id) # also store the channel ID
55
+
56
+ for i in range(1, 4+1): # 4 + 1 because range() only prints up to n-1
57
+ child_node = tree_df.iloc[row_index]["rec"+str(i)]
58
+
59
+ # break if we hit a cycle
60
+ if(child_node in set().union(*all_trees_by_ID.values())):
61
+ break
62
+ else:
63
+ child_row_index = tree_df.index[tree_df['originId'] == child_node].tolist()
64
+ explore_branches(tree_df, all_trees_by_ID, all_trees_by_channelID, child_row_index[0], step+1) # call recursively to get all the tree levels
65
+
66
+ def read_all_trees(tree_files):
67
+
68
+ all_trees_by_ID = {}
69
+ all_trees_by_channelID = {}
70
+ all_tree_files_df = pd.DataFrame()
71
+
72
+ for tree in tree_files:
73
+ print(tree)
74
+
75
+ # populate the tree
76
+ tree_df = pd.read_csv(tree)
77
+
78
+ explore_branches(tree_df, all_trees_by_ID, all_trees_by_channelID) # recusrively parse out video ID's and channel ID's from the trees.
79
+
80
+ # save the dataframe to all_tree_files_df
81
+ if(all_tree_files_df.empty):
82
+ all_tree_files_df = tree_df
83
+ else:
84
+ all_tree_files_df = pd.concat([all_tree_files_df, tree_df], axis=0)
85
+
86
+ return all_trees_by_ID, all_trees_by_channelID
87
+
88
+
89
+ tree_files_wage = glob.glob(os.path.join('../../data/recommendation trees/trees_wage/', '*.csv'))
90
+ tree_files_gun = glob.glob(os.path.join('../../data/recommendation trees/trees_gun/', '*.csv'))
91
+
92
+
93
+ all_trees_by_ID_wage, all_trees_by_channelID_wage = read_all_trees(tree_files_wage)
94
+ all_trees_by_ID_gun, all_trees_by_channelID_gun = read_all_trees(tree_files_gun)
95
+
96
+ # get the thing in parentheses as the topicid
97
+ gun_topicids = [re.search(r'\((.*?)\)', filename).group(1) for filename in tree_files_gun]
98
+ wage_topicids = [re.search(r'\((.*?)\)', filename).group(1) for filename in tree_files_wage]
99
+
100
+ # Do a little filtering to get the set of "real" participants
101
+ issue1 = pd.read_csv("../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w123_clean.csv", dtype = {"urlid": str})
102
+
103
+ print("(STUDY 1) Full length of data:")
104
+ print(len(issue1))
105
+
106
+ issue1 = issue1.dropna(subset=["treatment_arm"])
107
+
108
+ print("number of treatment arm workers in complete data:")
109
+ print(len(issue1))
110
+
111
+ print("number once zero engagement is dropped:")
112
+ issue1 = issue1.dropna(subset=["pro", "anti"])
113
+ print(len(issue1))
114
+
115
+ print("number of unique ID's:")
116
+ print(len(issue1["worker_id"].drop_duplicates()))
117
+ # identify the duplicates from the original issue1
118
+ duplicate_workers_issue1 = issue1[issue1.duplicated(subset=["worker_id"], keep=False)]["worker_id"].drop_duplicates()
119
+ # keep only the first response per unique worker_id
120
+ issue1 = issue1.drop_duplicates(subset=["worker_id"], keep='first')
121
+
122
+
123
+ print("number once NA topicID/urlID are dropped:")
124
+ issue1 = issue1.dropna(subset=["topic_id", "urlid"])[["worker_id", "topic_id", "urlid"]]
125
+ print(len(issue1))
126
+
127
+ # merge in thirds
128
+ thirds_workerid_i1 = pd.read_csv("../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w12_clean.csv")[["thirds", "worker_id"]].drop_duplicates()
129
+ issue1 = pd.merge(thirds_workerid_i1, issue1, on = "worker_id", how = "inner").drop_duplicates()
130
+
131
+ print("number once we merge in thirds from wave 2 (they are not present in the dataframe):")
132
+ print(len(issue1))
133
+
134
+ # take a look at the responses of the repeated workers
135
+ issue1_full = pd.read_csv("../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w123_clean.csv", dtype = {"urlid": str})
136
+
137
+ for worker in duplicate_workers_issue1:
138
+ worker_df = issue1_full[issue1_full["worker_id"]==worker][["gun_index_w3", "treatment_seed", "topic_id", "urlid"]].dropna()
139
+ if worker_df.empty:
140
+ continue
141
+ mismatched_columns = []
142
+ for col in worker_df.columns:
143
+ if not worker_df[col].eq(worker_df[col].iloc[0]).all():
144
+ mismatched_columns.append(col)
145
+ print(f"Worker: {worker}; Cols with mismatches: {mismatched_columns}")
146
+
147
+ issue2 = pd.read_csv("../results/intermediate data/minimum wage (issue 2)/qualtrics_w12_clean.csv")
148
+
149
+ print("(STUDY 2) Full length of data:")
150
+ print(len(issue2))
151
+
152
+ issue2 = issue2.dropna(subset=["treatment_arm"])
153
+
154
+ print("number of treatment arm workers in complete data:")
155
+ print(len(issue2))
156
+
157
+ print("number once zero engagement is dropped:")
158
+ issue2 = issue2.dropna(subset=["pro", "anti"])
159
+ print(len(issue2))
160
+
161
+ print("number of unique ID's:")
162
+ print(len(issue2["worker_id"].drop_duplicates()))
163
+ # identify the duplicates from the original issue2
164
+ duplicate_workers_issue2 = issue2[issue2.duplicated(subset=["worker_id"], keep=False)]["worker_id"].drop_duplicates()
165
+ # keep only the first response per unique worker_id
166
+ issue2 = issue2.drop_duplicates(subset=["worker_id"], keep='first')
167
+
168
+ print("number once NA topicID/urlID are dropped:")
169
+ issue2 = issue2.dropna(subset=["topic_id", "urlid"])[["worker_id", "topic_id", "urlid", "thirds"]]
170
+ print(len(issue2))
171
+
172
+ # take a look at the responses of the repeated workers
173
+ issue2_full = pd.read_csv("../results/intermediate data/minimum wage (issue 2)/qualtrics_w12_clean.csv")
174
+
175
+ for worker in duplicate_workers_issue2:
176
+ worker_df = issue2_full[issue2_full["worker_id"]==worker][["mw_support_w2", "treatment_seed", "topic_id", "urlid"]].dropna()
177
+ if worker_df.empty:
178
+ continue
179
+ mismatched_columns = []
180
+ for col in worker_df.columns:
181
+ if not worker_df[col].eq(worker_df[col].iloc[0]).all():
182
+ mismatched_columns.append(col)
183
+ print(f"Worker: {worker}; Cols with mismatches: {mismatched_columns}")
184
+
185
+ yougov_topicids = pd.read_csv("../results/intermediate data/minimum wage (issue 2)/yg_w12_clean.csv")
186
+
187
+ print("(STUDY 3) Full length of data:")
188
+ print(len(yougov_topicids))
189
+
190
+ yougov_topicids = yougov_topicids.dropna(subset=["treatment_arm"])
191
+
192
+ print("number of treatment arm workers in complete data:")
193
+ print(len(yougov_topicids))
194
+
195
+ print("number once zero engagement is dropped:")
196
+ yougov_topicids = yougov_topicids.dropna(subset=["pro", "anti"])
197
+ print(len(yougov_topicids))
198
+
199
+ print("number of unique ID's:")
200
+ print(len(yougov_topicids["caseid"].drop_duplicates()))
201
+ # identify the duplicates from the original issue2
202
+ duplicate_workers_yougov = yougov_topicids[yougov_topicids.duplicated(subset=["caseid"], keep=False)]["caseid"].drop_duplicates()
203
+ # keep only the first response per unique worker_id
204
+ yougov_topicids = yougov_topicids.drop_duplicates(subset=["caseid"], keep='first')
205
+
206
+ print("number once NA topicID/urlID are dropped:")
207
+ yougov_topicids = yougov_topicids.dropna(subset=["topic_id", "urlid"])[["caseid", "topic_id", "urlid", "thirds"]]
208
+ print(len(yougov_topicids))
209
+
210
+ # Get the topicIds assigned for people who were partisan (aka, filter out moderates)
211
+
212
+ # these are the real people!
213
+
214
+ # rename "worker_id" and "caseid" to "id"
215
+ issue1 = issue1.rename(columns={"worker_id":"id"})
216
+ issue2 = issue2.rename(columns={"worker_id":"id"})
217
+ yougov_topicids = yougov_topicids.rename(columns={"caseid":"id"})
218
+
219
+ # Decision (10/30): Drop Issue 1 entirely, because some people were recommended videos that were not pro/anti, and we don't have access to the rec set
220
+ ALL_PARTICIPANTS = pd.concat([issue2[["id", "thirds", "urlid", "topic_id"]], yougov_topicids[["id", "thirds", "urlid", "topic_id"]]], axis = 0)
221
+
222
+ len(ALL_PARTICIPANTS)
223
+
224
+
225
+ # Parse out the condition (pro or anti) and the distribution (3-1 or 2-2) from the names and save them to the JSONs
226
+
227
+ # Function to parse out the condition (pro or anti) and the distribution (3-1 or 2-2) from the names
228
+ def parse_condition(topicId):
229
+ topic_components = topicId.split("_")[-4:]
230
+ distr = topic_components[1]
231
+ political_leaning = 'anti' if 'a' in topic_components[-1] else 'pro'
232
+
233
+ try:
234
+ return (int(distr), political_leaning)
235
+ except ValueError:
236
+ print("unable to extract distribution for: " + str(topicId))
237
+ # these are cases in which we can't extract a distribution; e.g., aTPMXi4EaKE_june2021_1_p
238
+ # exclude them from analysis for now by returning None
239
+ return (None, political_leaning)
240
+
241
+
242
+ # Function to turn a video ID into a political leaning
243
+
244
+ # this function converts from the longer video ID's (which have added numbers) to the "raw" format used in the gold labels
245
+ def convert_vid_to_political_leaning(vidId):
246
+ # Keep up to the last non-numeric character and one trailing digit
247
+ vidId_mod = re.match(r'(.*\D\d)\d*$', vidId)
248
+ if vidId_mod:
249
+ vidId = vidId_mod.group(1)
250
+
251
+ matches = gold_labels[gold_labels["originId"] == vidId]["originCat"]
252
+
253
+ if not matches.empty:
254
+ return ', '.join(matches.astype(str))
255
+ else:
256
+ # If matches are empty, strip the last digit and try again
257
+ vidId_mod = re.match(r'(.*\D)\d$', vidId)
258
+ if vidId_mod:
259
+ vidId = vidId_mod.group(1)
260
+ matches = gold_labels[gold_labels["originId"] == vidId]["originCat"]
261
+ if not matches.empty:
262
+ return ', '.join(matches.astype(str))
263
+
264
+ return None
265
+
266
+
267
+ # Filter the JSON blob to the LONGEST blob for each person
268
+
269
+ topic_urlid_to_blob_map = defaultdict(list)
270
+ for data_obj in json_data:
271
+ key = (str(data_obj["topicID"]), str(data_obj["urlid"]))
272
+ topic_urlid_to_blob_map[key].append(data_obj)
273
+
274
+ # get the longest data_obj per key
275
+ topic_urlid_to_max_blob_map = defaultdict(dict)
276
+ for key in topic_urlid_to_blob_map.keys():
277
+ blobs_list = []
278
+ vid_watch_times = []
279
+ for blob in topic_urlid_to_blob_map[key]:
280
+ try:
281
+ vid_watch_time = np.sum(list(blob['vidWatchTimes'].values()))
282
+ except KeyError:
283
+ vid_watch_time = 0
284
+
285
+ blobs_list.append(blob)
286
+ vid_watch_times.append(vid_watch_time)
287
+
288
+ max_blob = blobs_list[np.argmax(vid_watch_times)]
289
+ topic_urlid_to_max_blob_map[key] = max_blob
290
+
291
+
292
+ # This is our main function for reading in the recommendations & what people chose
293
+
294
+ def get_results_distribution(participant_url_identifiers, complete_only = False):
295
+
296
+ print("Number of participant identifiers:")
297
+ print(len(participant_url_identifiers))
298
+
299
+ # create a set that appends the urlid to the topicId
300
+ urlid_topicid_set = {f"{row['urlid']}_{row['topic_id']}" for _, row in participant_url_identifiers.iterrows()}
301
+
302
+ # here, we're looking only at the MAX time blob for each person
303
+ orig_experiment_json = [obj for obj in topic_urlid_to_max_blob_map.values() if (obj['urlid'] + '_' + obj['topicID'] in urlid_topicid_set)]
304
+
305
+ # filter to only COMPLETE sessions
306
+ if(complete_only == True):
307
+ orig_experiment_json = [obj for obj in orig_experiment_json if obj["sessionFinished"] == True]
308
+
309
+ print("Number of participant JSON objects:")
310
+ print(len(orig_experiment_json))
311
+
312
+ # unpack information about the distribution and condition from the topicId
313
+ for obj in orig_experiment_json:
314
+ obj['distribution'], obj['political_leaning'] = parse_condition(obj["topicID"])
315
+
316
+ RESULTS_DICT = { # this is our main results dictionary
317
+ 'pro': {22:[], 31: []},
318
+ 'anti': {22:[], 31: []}
319
+ }
320
+
321
+ failures = { # log failures or reasons why we couldn't process all the data
322
+ "no_distribution": 0,
323
+ "videos_missing": 0,
324
+ "recs_incomplete": 0,
325
+ "activity_incomplete": 0,
326
+ "video_non_pro_anti": 0,
327
+ "reversed_politics_for_study1": 0}
328
+
329
+ NOBS_counted = 0
330
+
331
+ url_topic_id_processed = set()
332
+
333
+ for obj in orig_experiment_json:
334
+
335
+ participant_id = obj['urlid']
336
+
337
+ processed = False # Keep track of whether we processed this participant
338
+ if(obj['distribution']) is None or obj['distribution'] not in {22, 31}:
339
+ failures["no_distribution"] += 1
340
+ continue # We don't know the distribution; we can't analyze this
341
+
342
+ try:
343
+ recommendations = obj['displayOrders']
344
+ except KeyError:
345
+ failures["activity_incomplete"] += 1
346
+
347
+ if not processed: # Only process recommendations if not already processed
348
+ try:
349
+ recs_keys = ['2-recs', '3-recs', '4-recs', '5-recs']
350
+ rec_info = {}
351
+ for i, key in enumerate(recs_keys):
352
+ rec_list = recommendations[key]
353
+ rec_list_leanings = [convert_vid_to_political_leaning(vid) for vid in rec_list]
354
+ rec_info[i+1] = { # We start recs at level 2
355
+ "videos": rec_list,
356
+ "political_leaning": rec_list_leanings
357
+ }
358
+ except KeyError as e:
359
+ failures["recs_incomplete"] += 1 # Save what we have and move on to the next participant
360
+
361
+ watched_leanings = []
362
+ try:
363
+ p_watchlist = list(obj['vids'])
364
+ for rec_level, video in enumerate(p_watchlist):
365
+ if (rec_level == 0): continue # Start recs at level 2
366
+ if(video in rec_info[rec_level]["videos"]):
367
+ political_leaning = convert_vid_to_political_leaning(video)
368
+ if(political_leaning not in ['pro', 'anti']):
369
+ continue
370
+ watched_leanings.append(political_leaning)
371
+ except KeyError as e:
372
+ failures["activity_incomplete"] += 1
373
+
374
+ RESULTS_DICT[obj['political_leaning']][obj['distribution']].append({participant_id: watched_leanings})
375
+ processed = True # Mark this participant as processed
376
+
377
+ # Counts the number of participants for which we were able to get valid data
378
+ if processed and (obj['topicID'], obj['urlid']) not in url_topic_id_processed:
379
+ NOBS_counted += 1
380
+ url_topic_id_processed.add((obj['topicID'], obj['urlid']))
381
+
382
+ return RESULTS_DICT, NOBS_counted, failures, url_topic_id_processed
383
+
384
+
385
+ # This is the version that includes PARTIAL data
386
+
387
+ # run this for the liberals
388
+ print("Liberals:")
389
+ libs = ALL_PARTICIPANTS[ALL_PARTICIPANTS["thirds"] == 1]
390
+ results_lib, nobs_lib, failures_lib, url_topic_id_processed_lib = get_results_distribution(libs)
391
+
392
+ # run this for the conservatives
393
+ print("Conservatives:")
394
+ cons = ALL_PARTICIPANTS[ALL_PARTICIPANTS["thirds"] == 3]
395
+ results_cons, nobs_cons, failures_cons, url_topic_id_processed_cons = get_results_distribution(cons)
396
+
397
+ # (and just for kicks) run this for the moderates
398
+ print("Moderates:")
399
+ mods = ALL_PARTICIPANTS[ALL_PARTICIPANTS["thirds"] == 2]
400
+ results_mods, nobs_mods, failures_mods, url_topic_id_processed_mods = get_results_distribution(mods)
401
+
402
+ def turn_results_dict_to_dataframe(data):
403
+ rows = []
404
+
405
+ for top_level_key in data:
406
+ for second_level_key in data[top_level_key]:
407
+ for participant_dict in data[top_level_key][second_level_key]:
408
+ for participant_id, choices in participant_dict.items():
409
+ row = {
410
+ 'pro': 1 if top_level_key == 'pro' else 0,
411
+ 'anti': 1 if top_level_key == 'anti' else 0,
412
+ '22': 1 if second_level_key == 22 else 0,
413
+ '31': 1 if second_level_key == 31 else 0,
414
+ 'participantID': participant_id,
415
+ 'choice_1': choices[0] if len(choices) > 0 else None,
416
+ 'choice_2': choices[1] if len(choices) > 1 else None,
417
+ 'choice_3': choices[2] if len(choices) > 2 else None,
418
+ 'choice_4': choices[3] if len(choices) > 3 else None,
419
+ }
420
+ rows.append(row)
421
+
422
+ return pd.DataFrame(rows)
423
+
424
+ df_results_lib = turn_results_dict_to_dataframe(results_lib)
425
+ df_results_cons = turn_results_dict_to_dataframe(results_cons)
426
+
427
+ # EXPERIMENT: drop anyone who didn't finish
428
+ df_results_lib_complete_only = df_results_lib.dropna(subset=["choice_1", "choice_2", "choice_3", "choice_4"])
429
+ df_results_cons_complete_only = df_results_cons.dropna(subset=["choice_1", "choice_2", "choice_3", "choice_4"])
430
+
431
+ # first, pivot the df to long
432
+ df_libs_long = pd.melt(
433
+ df_results_lib,
434
+ id_vars=['pro', 'anti', '22', '31', 'participantID'],
435
+ value_vars=['choice_1', 'choice_2', 'choice_3', 'choice_4'],
436
+ var_name='choice_number',
437
+ value_name='choice'
438
+ ).dropna(subset=["choice"])
439
+
440
+ df_cons_long = pd.melt(
441
+ df_results_cons,
442
+ id_vars=['pro', 'anti', '22', '31', 'participantID'],
443
+ value_vars=['choice_1', 'choice_2', 'choice_3', 'choice_4'],
444
+ var_name='choice_number',
445
+ value_name='choice'
446
+ ).dropna(subset=["choice"])
447
+
448
+ # also pivot the complete only dfs
449
+ df_libs_long_complete_only = pd.melt(
450
+ df_results_lib_complete_only,
451
+ id_vars=['pro', 'anti', '22', '31', 'participantID'],
452
+ value_vars=['choice_1', 'choice_2', 'choice_3', 'choice_4'],
453
+ var_name='choice_number',
454
+ value_name='choice'
455
+ ).dropna(subset=["choice"])
456
+
457
+ df_cons_long_complete_only = pd.melt(
458
+ df_results_cons_complete_only,
459
+ id_vars=['pro', 'anti', '22', '31', 'participantID'],
460
+ value_vars=['choice_1', 'choice_2', 'choice_3', 'choice_4'],
461
+ var_name='choice_number',
462
+ value_name='choice'
463
+ ).dropna(subset=["choice"])
464
+
465
+ # for lib, set 'pro' to 1 and 'anti' to 0 in choice_1, choice_2, choice_3, and choice_4
466
+ df_libs_long[["choice"]] = df_libs_long[["choice"]].applymap(lambda x: 1 if x == 'pro' else 0)
467
+ df_libs_long_complete_only[["choice"]] = df_libs_long_complete_only[["choice"]].applymap(lambda x: 1 if x == 'pro' else 0)
468
+
469
+ # for cons, set 'pro' to 0 and 'anti' to 1 in choice_1, choice_2, choice_3, and choice_4
470
+ df_cons_long[["choice"]] = df_cons_long[["choice"]].applymap(lambda x: 1 if x == 'anti' else 0)
471
+ df_cons_long_complete_only[["choice"]] = df_cons_long_complete_only[["choice"]].applymap(lambda x: 1 if x == 'anti' else 0)
472
+
473
+ df_cons_long_22 = df_cons_long[df_cons_long["22"] == 1]
474
+ df_cons_long_31 = df_cons_long[df_cons_long["31"] == 1]
475
+
476
+ # complete_only
477
+ df_cons_long_complete_only_22 = df_cons_long_complete_only[df_cons_long_complete_only["22"] == 1]
478
+ df_cons_long_complete_only_31 = df_cons_long_complete_only[df_cons_long_complete_only["31"] == 1]
479
+
480
+ df_libs_long_22 = df_libs_long[df_libs_long["22"] == 1]
481
+ df_libs_long_31 = df_libs_long[df_libs_long["31"] == 1]
482
+
483
+ # complete_only
484
+ df_libs_long_complete_only_22 = df_libs_long_complete_only[df_libs_long_complete_only["22"] == 1]
485
+ df_libs_long_complete_only_31 = df_libs_long_complete_only[df_libs_long_complete_only["31"] == 1]
486
+
487
+
488
+ # Linear Regression clustering by participant
489
+
490
+ # run the regression for conservatives in 22
491
+ y_cons22 = df_cons_long_22["choice"].astype(float)
492
+ X_cons22 = sm.add_constant(pd.Series(1, index=y_cons22.index))
493
+ model_cons22 = sm.OLS(y_cons22, X_cons22)
494
+ results_cons22 = model_cons22.fit(cov_type='cluster', cov_kwds={'groups': df_cons_long_22['participantID']})
495
+ print(results_cons22.summary())
496
+
497
+ # run the regression for conservatives in 31
498
+ y_cons31 = df_cons_long_31["choice"].astype(float)
499
+ X_cons31 = sm.add_constant(pd.Series(1, index=y_cons31.index))
500
+ model_cons31 = sm.OLS(y_cons31, X_cons31)
501
+ results_cons31 = model_cons31.fit(cov_type='cluster', cov_kwds={'groups': df_cons_long_31['participantID']})
502
+ print(results_cons31.summary())
503
+
504
+ # run the regression for liberals in 22
505
+ y_libs22 = df_libs_long_22["choice"].astype(float)
506
+ X_libs22 = sm.add_constant(pd.Series(1, index=y_libs22.index))
507
+ model_libs22 = sm.OLS(y_libs22, X_libs22)
508
+ results_libs22 = model_libs22.fit(cov_type='cluster', cov_kwds={'groups': df_libs_long_22['participantID']})
509
+ print(results_libs22.summary())
510
+
511
+ # run the regression for liberals in 31
512
+ y_libs31 = df_libs_long_31["choice"].astype(float)
513
+ X_libs31 = sm.add_constant(pd.Series(1, index=y_libs31.index))
514
+ model_libs31 = sm.OLS(y_libs31, X_libs31)
515
+ results_libs31 = model_libs31.fit(cov_type='cluster', cov_kwds={'groups': df_libs_long_31['participantID']})
516
+ print(results_libs31.summary())
517
+
518
+ # now run the regressions for the complete only dfs
519
+ # run the regression for conservatives in 22
520
+ y_cons22_complete_only = df_cons_long_complete_only_22["choice"].astype(float)
521
+ X_cons22_complete_only = sm.add_constant(pd.Series(1, index=y_cons22_complete_only.index))
522
+ model_cons22_complete_only = sm.OLS(y_cons22_complete_only, X_cons22_complete_only)
523
+ results_cons22_complete_only = model_cons22_complete_only.fit(cov_type='cluster', cov_kwds={'groups': df_cons_long_complete_only_22['participantID']})
524
+ print(results_cons22_complete_only.summary())
525
+
526
+ # run the regression for conservatives in 31
527
+ y_cons31_complete_only = df_cons_long_complete_only_31["choice"].astype(float)
528
+ X_cons31_complete_only = sm.add_constant(pd.Series(1, index=y_cons31_complete_only.index))
529
+ model_cons31_complete_only = sm.OLS(y_cons31_complete_only, X_cons31_complete_only)
530
+ results_cons31_complete_only = model_cons31_complete_only.fit(cov_type='cluster', cov_kwds={'groups': df_cons_long_complete_only_31['participantID']})
531
+ print(results_cons31_complete_only.summary())
532
+
533
+ df_libs_long_complete_only_22["choice"].dropna().mean().mean()
534
+
535
+ df_libs_long_complete_only_22["choice"].describe()
536
+
537
+ # complete only dfs
538
+ # run the regression for liberals in 22
539
+ y_libs22_complete_only = df_libs_long_complete_only_22["choice"].astype(float)
540
+ X_libs22_complete_only = sm.add_constant(pd.Series(1, index=y_libs22_complete_only.index))
541
+ model_libs22_complete_only = sm.OLS(y_libs22_complete_only, X_libs22_complete_only)
542
+ results_libs22_complete_only = model_libs22_complete_only.fit(cov_type='cluster', cov_kwds={'groups': df_libs_long_complete_only_22['participantID']})
543
+ print(results_libs22_complete_only.summary())
544
+
545
+ # run the regression for liberals in 31
546
+ y_libs31_complete_only = df_libs_long_complete_only_31["choice"].astype(float)
547
+ X_libs31_complete_only = sm.add_constant(pd.Series(1, index=y_libs31_complete_only.index))
548
+ model_libs31_complete_only = sm.OLS(y_libs31_complete_only, X_libs31_complete_only)
549
+ results_libs31_complete_only = model_libs31_complete_only.fit(cov_type='cluster', cov_kwds={'groups': df_libs_long_complete_only_31['participantID']})
550
+ print(results_libs31_complete_only.summary())
551
+
552
+
553
+ # ### Get stats for how many observations of each seed we processed
554
+
555
+ # Including INCOMPLETE data
556
+
557
+ # how many observations did we get valid data from?
558
+ print("Libs, Cons, Mods")
559
+ print(nobs_lib, nobs_cons, nobs_mods)
560
+ print("total:")
561
+ print(nobs_lib + nobs_cons + nobs_mods)
562
+
563
+ # ### Accounting by Topic ID + URL ID
564
+
565
+ # of those observations, how many UNIQUE topicID + urlids did we get?
566
+ ### INCOMPLETE data included
567
+ print("Libs, Cons, Mods")
568
+ print(len(url_topic_id_processed_lib), len(url_topic_id_processed_cons), len(url_topic_id_processed_mods))
569
+ print("total:")
570
+ print(len(url_topic_id_processed_lib) + len(url_topic_id_processed_cons) +len(url_topic_id_processed_mods))
571
+
572
+
573
+ # ### Look at Failures
574
+ failures_lib, failures_cons, failures_mods
575
+
576
+ # ### Summary Statistics
577
+ def flatten_nested_dict(input_dict):
578
+ result = {
579
+ 'pro': {22: [], 31: []},
580
+ 'anti': {22: [], 31: []}
581
+ }
582
+
583
+ for top_level_key, inner_dict in input_dict.items():
584
+ for second_level_key, participants in inner_dict.items():
585
+ for participant_dict in participants:
586
+ for choices in participant_dict.values():
587
+ result[top_level_key][second_level_key].extend(choices)
588
+
589
+ return result
590
+
591
+ results_lib_flat = flatten_nested_dict(results_lib)
592
+ results_cons_flat = flatten_nested_dict(results_cons)
593
+
594
+ len(results_cons_flat['anti'][22])
595
+ len(results_cons_flat['anti'][31])
596
+
597
+ def print_summary_stats_for_results(RESULTS_DICT):
598
+ for seed, sub_dict in RESULTS_DICT.items():
599
+ print(f"Summary statistics for '{seed}' seed:")
600
+
601
+ for key, labels in sub_dict.items():
602
+ total_labels = len(labels)
603
+ if total_labels == 0:
604
+ print(f" List {key}: No labels to evaluate")
605
+ continue
606
+
607
+ # Count how many labels agree with the parent category ('pro' or 'anti')
608
+ count_agree = sum(1 for label in labels if label == seed)
609
+ percent_agree = (count_agree / total_labels) * 100
610
+
611
+ # Print the statistics
612
+ print(f" [{key}]: {percent_agree:.2f}% selected videos with same partisanship as seed")
613
+
614
+ print("Liberals---------------------------")
615
+ print_summary_stats_for_results(results_lib_flat)
616
+ print("Conservatives----------------------")
617
+ print_summary_stats_for_results(results_cons_flat)
618
+
619
+
620
+ # ## Simulation of Random Guessing
621
+ #
622
+ # This creates the baseline for which we compare everything
623
+
624
+ def simulate_random_watching(N_ITER = 1000000, video_set = [1, 0, 0, 0]): # 1 is pro and 0 is anti
625
+
626
+ video_set_cur = video_set
627
+ all_watched_videos = []
628
+
629
+ for iter in range(N_ITER):
630
+
631
+ watched_videos = []
632
+
633
+ for i in range(4): # we make 4 choices
634
+
635
+ # draws from the initial video distribution
636
+ random_index = np.random.choice(4, 1)[0]
637
+ random_video = video_set_cur[random_index]
638
+
639
+ if(random_video == 1):
640
+ video_set_cur = [1, 1, 1, 0]
641
+ else:
642
+ video_set_cur = [0, 0, 0, 1]
643
+
644
+ watched_videos.append(random_video)
645
+
646
+ all_watched_videos.append(watched_videos)
647
+ # reset the video set
648
+ video_set_cur = video_set
649
+
650
+ return all_watched_videos
651
+
652
+ simulated_31_anti = simulate_random_watching(video_set=[1, 0, 0, 0])
653
+ simulated_31_anti_flat = [item for sublist in simulated_31_anti for item in sublist]
654
+ print("Probability of selecting a pro video (given 3-1 distribution):")
655
+ print(1-np.mean(simulated_31_anti_flat))
656
+ p_31_anti = 1-np.mean(simulated_31_anti_flat)
657
+
658
+ simulated_31_pro = simulate_random_watching(video_set=[0, 1, 1, 1])
659
+ simulated_31_pro_flat = [item for sublist in simulated_31_pro for item in sublist]
660
+ print("Probability of selecting a pro video (given 3-1 distribution):")
661
+ print(np.mean(simulated_31_pro_flat))
662
+ p_31_pro = np.mean(simulated_31_pro_flat)
663
+
664
+ simulated_22 = simulate_random_watching(video_set=[1, 1, 0, 0])
665
+ simulated_22_flat = [item for sublist in simulated_22 for item in sublist]
666
+ print("Probability of selecting a pro video (given 2-2 distribution):")
667
+ print(np.mean(simulated_22_flat))
668
+ p_22_pro = np.mean(simulated_22_flat)
669
+
670
+ print("Probability of selecting an anti video (given 2-2 distribution):")
671
+ print(1-np.mean(simulated_22_flat))
672
+ p_22_anti = 1-np.mean(simulated_22_flat)
673
+
674
+
675
+ # ### Statistical Tests
676
+
677
+ # - whether conservative respondents, given a current conservative video, clicked a conservative recommendation at >.75 rate in the 3/1, or .5 in the 2/2
678
+ # - whether conservative respondents, given a current liberal video, clicked a conservative recommendation at >.25 rate in the 3/1, or .5 in the 2/2
679
+ # - whether liberal respondents, given a current liberal video, clicked a liberal recommendation at >.75 rate in the 3/1, or .5 in the 2/2
680
+ # - whether liberal respondents, given a current conservative video, clicked a liberal recommendation at >.25 rate in the 3/1, or .5 in the 2/2
681
+
682
+ simulated_baselines = {
683
+ "pro": {"22": p_22_pro, "31": p_31_pro},
684
+ "anti": {"22": p_22_anti, "31": p_31_anti}
685
+ }
686
+
687
+ # Function to calculate the proportion of 'pro' or 'anti' matches in the list
688
+ def calculate_proportion_matches(results_list, target_key):
689
+ matches = [1 if value == target_key else 0 for value in results_list]
690
+ return matches
691
+
692
+ # Collect the proportions and run t-tests for both keys (22 and 31)
693
+ def run_stat_tests(RESULTS_DICT):
694
+ for parent_key, data in RESULTS_DICT.items():
695
+ print(f"Testing for parent key: {parent_key}")
696
+
697
+ # For key 22, test against 0.5
698
+ if(len(data[22]) > 0):
699
+ proportion_22 = calculate_proportion_matches(data[22], parent_key)
700
+ t_statistic_22, p_value_22 = stats.ttest_1samp(a=proportion_22, popmean=simulated_baselines[parent_key]["22"])
701
+ print(f"t-test for 22 key (test {np.mean(proportion_22)} against {simulated_baselines[parent_key]['22']})")
702
+ print(f"t-statistic: {round(t_statistic_22, 5)}")
703
+ print(f"p-value: {round(p_value_22, 5)}")
704
+
705
+ # For key 31, test against 0.75
706
+ if(len(data[31]) > 0):
707
+ proportion_31 = calculate_proportion_matches(data[31], parent_key)
708
+ t_statistic_31, p_value_31 = stats.ttest_1samp(a=proportion_31, popmean=simulated_baselines[parent_key]["31"])
709
+ print(f"t-test for 31 key (test {np.mean(proportion_31)} against {simulated_baselines[parent_key]['31']})")
710
+ print(f"t-statistic: {round(t_statistic_31, 10)}")
711
+ print(f"p-value: {round(p_value_31, 5)}")
712
+
713
+ print("Liberals---------------------------")
714
+ run_stat_tests(results_lib_flat)
715
+ print("Conservatives----------------------")
716
+ run_stat_tests(results_cons_flat)
717
+
718
+
719
+ # # Figuring out data anomalies
720
+
721
+ # First, there are some overlapping participants (326) between Study 1 and Study 2
722
+
723
+ len(set(issue1["id"]).intersection(set(issue2["id"])))
724
+
725
+
726
+ # These are the topicID's for everyone we expect to have data for
727
+
728
+ ALL_PARTICIPANTS
729
+
730
+ # Search for the participant in the JSON data and figure out whether we have at least one session for the participant in which they actually viewed data
731
+
732
+ # these are the number of Topic/URL Id's among our participants
733
+ unique_participant_topic_urlid = set([(str(row["topic_id"]), str(row["urlid"])) for _, row in ALL_PARTICIPANTS.iterrows()])
734
+ len(unique_participant_topic_urlid)
735
+
736
+ # these are the number of Topic/URL Id's within the JSON data
737
+ unique_json_topic_urlid = set([(str(obj["topicID"]), str(obj["urlid"])) for obj in json_data])
738
+ len(unique_json_topic_urlid)
739
+
740
+ # Actually, every participant is in the data somewhere!
741
+ len(unique_json_topic_urlid.intersection(unique_participant_topic_urlid))
742
+
743
+
744
+ # Figure out what each of the participants did on the platform
745
+ participant_session_counts = defaultdict(int)
746
+ participant_vids_present_session_counts = defaultdict(int)
747
+ participant_complete_session_counts = defaultdict(int)
748
+
749
+ participants_set = set()
750
+
751
+ topic_urlid_data_map = defaultdict(list)
752
+
753
+ for data_obj in json_data:
754
+ key = (str(data_obj["topicID"]), str(data_obj["urlid"]))
755
+ topic_urlid_data_map[key].append({
756
+ "has_vids": 'vids' in data_obj.keys(),
757
+ "sessionFinished": data_obj.get("sessionFinished", False)
758
+ })
759
+
760
+ for _, participant in tqdm(ALL_PARTICIPANTS.iterrows(), total=len(ALL_PARTICIPANTS), desc="Processing participants"):
761
+
762
+ topicID = str(participant["topic_id"])
763
+ urlid = str(participant["urlid"])
764
+ participant_id = str(participant["id"])
765
+
766
+ # Use the (topicID, urlid) pair as the key
767
+ key = (topicID, urlid)
768
+
769
+ # Get the relevant data objects from the pre-built map
770
+ if key in topic_urlid_data_map:
771
+
772
+ participants_set.add(participant_id)
773
+
774
+ for data_obj in topic_urlid_data_map[key]:
775
+ # Track if the participant has any appearances in the data at all
776
+ participant_session_counts[key] += 1
777
+
778
+ # Track how many times the participant saw valid videos
779
+ if data_obj["has_vids"]:
780
+ participant_vids_present_session_counts[key] += 1
781
+
782
+ # Track how many times they have complete data
783
+ if data_obj["sessionFinished"]:
784
+ participant_complete_session_counts[key] += 1
785
+
786
+
787
+ # Who actually completed their sessions?
788
+ len(participant_session_counts) # everyone is in the data somewhere
789
+
790
+ len(participant_vids_present_session_counts) # everyone has some kind of video interaction
791
+
792
+ len(participant_complete_session_counts) # only 5,573 people fully completed the study
793
+
794
+ len(ALL_PARTICIPANTS)
795
+
796
+
797
+ # But why is the number of unique participant ID's different?
798
+ #
799
+ # **It turns out that it's because of the overlap in participants between Study 1 and Study 2!!!**
800
+
801
+ # assert(len(ALL_PARTICIPANTS) - len(participants_set) == len(set(issue1["id"]).intersection(set(issue2["id"]))))
802
+
803
+
804
+ # If we look only at "complete" sessions: the same participant has up to 3 complete sessions
805
+ #
806
+ # If we include incomplete sessions: the same participant can have up to 14 partial sessions (!!!)
807
+
808
+ pd.Series(participant_complete_session_counts.values()).value_counts()
809
+
810
+ pd.Series(participant_vids_present_session_counts.values()).value_counts()
811
+
812
+ pd.Series(participant_session_counts.values()).value_counts()
813
+
814
+ # Who are the participants who didn't complete the study, and what were their other survey DV's like?
815
+
816
+ no_session = unique_participant_topic_urlid.difference(set(participant_complete_session_counts.keys()))
817
+
818
+ len(no_session)
819
+
820
+
821
+ # 272 were in Study 1
822
+ study1_topic_urlid = set([(str(row["topic_id"]), str(row["urlid"])) for _, row in issue1.iterrows()])
823
+ len(study1_topic_urlid.intersection(no_session))
824
+
825
+ # 196 were in Study 2
826
+ study2_topic_urlid = set([(str(row["topic_id"]), str(row["urlid"])) for _, row in issue2.iterrows()])
827
+ len(study2_topic_urlid.intersection(no_session))
828
+
829
+
830
+ # 3 were in Study 3
831
+ yougov_topic_urlid = set([(str(row["topic_id"]), str(row["urlid"])) for _, row in yougov_topicids.iterrows()])
832
+ len(yougov_topic_urlid.intersection(no_session))
833
+
834
+ issue1_full = pd.read_csv("../results/intermediate data/gun control (issue 1)/guncontrol_qualtrics_w123_clean.csv")
835
+ issue2_full = pd.read_csv("../results/intermediate data/minimum wage (issue 2)/qualtrics_w12_clean.csv")
836
+ yougov_full = pd.read_csv("../results/intermediate data/minimum wage (issue 2)/yg_w12_clean.csv")
837
+
838
+ columns_to_collect = ["duration", "total_interactions", "gun_index_w2", "gun_index_2", "gun_index_w3", "stricter_laws_w3",
839
+ "right_to_own_importance_w3", "assault_ban_w3", "handgun_ban_w3", "concealed_safe_w3",
840
+ "gun_index_2_w3", "mw_index_w2", "trust_youtube_w2", "media_trust_w2", "media_trust_w3", "affpol_smart",
841
+ "smart_dems_w2", "smart_reps_w2"]
842
+
843
+ collected_values = {col: [] for col in columns_to_collect}
844
+ num_redundancies_by_study = {"Study_1": 0, "Study_2": 0, "Study_3": 0}
845
+
846
+ def collect_values_from_study(df, id_column, participant_ids, collected_values):
847
+ filtered_df = df[df[id_column].isin(participant_ids)]
848
+ for col in columns_to_collect:
849
+ if col in filtered_df.columns:
850
+ collected_values[col].extend(filtered_df[col].dropna().tolist())
851
+
852
+ for (topic_id, urlid) in no_session:
853
+ # Find matches in the datasets
854
+ matches_i1 = issue1[(issue1['topic_id'] == topic_id) & (issue1['urlid'] == urlid)]
855
+ matches_i2 = issue2[(issue2['topic_id'] == topic_id) & (issue2['urlid'] == urlid)]
856
+ matches_yg = yougov_topicids[(yougov_topicids['topic_id'] == topic_id) & (yougov_topicids['urlid'] == urlid)]
857
+
858
+ if not matches_i1.empty:
859
+ num_redundancies_by_study["Study_1"] += 1
860
+ collect_values_from_study(issue1_full, "worker_id", matches_i1["id"], collected_values)
861
+
862
+ if not matches_i2.empty:
863
+ num_redundancies_by_study["Study_2"] += 1
864
+ collect_values_from_study(issue2_full, "worker_id", matches_i2["id"], collected_values)
865
+
866
+ if not matches_yg.empty:
867
+ num_redundancies_by_study["Study_3"] += 1
868
+ collect_values_from_study(yougov_full, "caseid", matches_yg["id"], collected_values)
869
+
870
+ # Filter columns with non-empty collected values
871
+ non_empty_cols = {col: values for col, values in collected_values.items() if values}
872
+
873
+ # Set up the plot grid based on the number of columns with data
874
+ num_plots = len(non_empty_cols)
875
+ n_cols = 3 # Maximum number of columns per row
876
+ n_rows = math.ceil(num_plots / n_cols) # Dynamically calculate number of rows
877
+
878
+ # Set up the figure size dynamically based on number of plots
879
+ plt.figure(figsize=(5 * n_cols, 4 * n_rows))
880
+
881
+ # Plot histograms for non-empty columns
882
+ for i, (col, values) in enumerate(non_empty_cols.items(), 1):
883
+ plt.subplot(n_rows, n_cols, i)
884
+ plt.hist(values, bins=20, color='blue', edgecolor='black')
885
+ plt.title(col)
886
+ plt.xlabel(col)
887
+ plt.ylabel('Frequency')
888
+
889
+ plt.tight_layout()
890
+ plt.show()
891
+
892
+ num_redundancies_by_study
893
+
environment/Dockerfile ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # hash:sha256:44adf2d24cf24272656bd8a2cea0b2fc9a3de031215a3677db81b126432df80f
2
+ FROM registry.codeocean.com/codeocean/py-r:python3.10.12-R4.3.2-JupyterLab4.0.10-RStudiorstudio-server-2023.12.0-369-ubuntu22.04
3
+
4
+ ARG DEBIAN_FRONTEND=noninteractive
5
+
6
+ RUN pip install -U --no-cache-dir \
7
+ matplotlib==3.10.0 \
8
+ numpy==1.26.4 \
9
+ pandas==2.2.3 \
10
+ rpy2==3.5.17 \
11
+ seaborn \
12
+ stargazer==0.0.7
13
+
14
+ RUN Rscript -e 'remotes::install_version("car", "3.1-3")' \
15
+ && Rscript -e 'remotes::install_version("corrplot", "0.95")' \
16
+ && Rscript -e 'remotes::install_version("covr", "3.6.4")' \
17
+ && Rscript -e 'remotes::install_version("doParallel", "1.0.17")' \
18
+ && Rscript -e 'remotes::install_version("fastDummies", "1.7.5")' \
19
+ && Rscript -e 'remotes::install_version("feather", "0.3.5")' \
20
+ && Rscript -e 'remotes::install_version("ggtext", "0.1.2")' \
21
+ && Rscript -e 'remotes::install_version("janitor", "2.2.1")' \
22
+ && Rscript -e 'remotes::install_version("lubridate", "1.9.4")' \
23
+ && Rscript -e 'remotes::install_version("mockr", "0.2.1")' \
24
+ && Rscript -e 'remotes::install_version("psych", "2.4.12")' \
25
+ && Rscript -e 'remotes::install_version("randomizr", "1.0.0")' \
26
+ && Rscript -e 'remotes::install_version("sandwich", "3.1-1")' \
27
+ && Rscript -e 'remotes::install_version("stargazer", "5.2.3")' \
28
+ && Rscript -e 'remotes::install_version("systemfonts", "1.2.1")' \
29
+ && Rscript -e 'remotes::install_version("textshaping", "1.0.0")' \
30
+ && Rscript -e 'remotes::install_version("tidyverse", "2.0.0")'
31
+
32
+ ADD "https://github.com/coder/code-server/releases/download/v4.95.3/code-server-4.95.3-linux-amd64.tar.gz" /.code-server/code-server.tar.gz
33
+
34
+ RUN cd /.code-server \
35
+ && tar -xvf code-server.tar.gz \
36
+ && rm code-server.tar.gz \
37
+ && ln -s /.code-server/code-server-4.95.3-linux-amd64/bin/code-server /usr/bin/code-server
38
+
39
+ RUN mkdir -p /.vscode/extensions \
40
+ && code-server --extensions-dir="/.vscode/extensions" --install-extension REditorSupport.R \
41
+ && code-server --extensions-dir="/.vscode/extensions" --install-extension continue.continue \
42
+ && code-server --extensions-dir="/.vscode/extensions" --install-extension ms-python.python \
43
+ && code-server --extensions-dir="/.vscode/extensions" --install-extension ms-toolsai.jupyter \
44
+ && code-server --extensions-dir="/.vscode/extensions" --install-extension reageyao.bioSyntax \
45
+ && code-server --extensions-dir="/.vscode/extensions" --install-extension saoudrizwan.claude-dev
metadata/metadata.yml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ metadata_version: 1
2
+ name: Short-term exposure to filter-bubble algorithmic recommendations have limited
3
+ effects on polarization
4
+ description: An enormous literature argues that recommendation algorithms drive political
5
+ polarization by creating "filter bubbles" and "rabbit holes." Using four experiments
6
+ with nearly 9,000 participants, we show that manipulating algorithmic recommendations
7
+ to create these conditions has limited effects on opinions. Our experiments employ
8
+ a custom-built video platform with a naturalistic, YouTube-like interface presenting
9
+ real YouTube videos and recommendations. We experimentally manipulate YouTube's
10
+ actual recommendation algorithm to simulate "filter bubbles" and "rabbit holes"
11
+ by presenting ideologically balanced and slanted choices. Our design allows us to
12
+ intervene in a feedback loop that has confounded the study of algorithmic polarization—the
13
+ complex interplay between *supply* of recommendations and user *demand* for content—to
14
+ examine downstream effects on policy attitudes. We use over 130,000 experimentally
15
+ manipulated recommendations and 31,000 platform interactions to estimate how recommendation
16
+ algorithms alter users' media consumption decisions and, indirectly, their political
17
+ attitudes. Our results cast doubt on widely circulating theories of algorithmic
18
+ polarization by showing that even heavy-handed (although short-term) perturbations
19
+ of real-world recommendations have limited causal effects on policy attitudes. Given
20
+ our inability to detect consistent evidence for algorithmic effects, we argue the
21
+ burden of proof for claims about algorithm-induced polarization has shifted. Our
22
+ methodology, which captures and modifies the output of real-world recommendation
23
+ algorithms, offers a path forward for future investigations of black-box artificial
24
+ intelligence systems. Our findings reveal practical limits to effect sizes that
25
+ are feasibly detectable in academic experiments.
26
+ authors:
27
+ - name: Naijia Liu, Xinlan Emily Hu, Yasemin Savas, Matthew Baum, Adam Berinsky, Allison
28
+ Chaney, Christopher Lucas, Rei Mariman, Justin de Benedictis-Kessner, Andrew Guess,
29
+ Dean Knox, and Brandon Stewart
30
+ affiliations:
31
+ - name: Multiple
32
+ corresponding_contributor:
33
+ name: Dean Knox
34
+ email: dcknox@upenn.edu