### Initialize workspace. ## Clear workspace. rm(list = ls(all = TRUE)) ## Confirm working directory. setwd("~/Downloads/hbg_replication") ## Set seed. set.seed(123) ## Set number of iterations for bootstrap replication. n_iter <- 10000 ## Load relevant packages. library(sandwich) library(car) ## Load relevant helper functions. source("scripts/helper_functions.R") ## Load data. # Load experimental data. tpnw <- read.csv("data/tpnw_data.csv", row.names = 1, stringsAsFactors = FALSE) # Load YouGov data. aware <- read.csv("data/tpnw_aware.csv", row.names = 1, stringsAsFactors = FALSE) ### Define relevant objects. ## Define objects specifying outcomes. # Specify join_tpnw object, representing main outcome. join_tpnw <- "join_tpnw" # Specify tpnw_atts object, representing attitudinal outcomes. tpnw_atts <- names(tpnw)[startsWith(names(tpnw), "tpnw_atts")] # Specify all_outs object, concatenating main and attitudinal outcomes. all_outs <- c(join_tpnw, tpnw_atts) ## Define objects specifying predictors. # Define object specifying main treatments. treats <- c("group_cue", "security_cue", "norms_cue", "institutions_cue") # Define object specifying general demographics. demos <- c("age", "female", "midwest", "west", "south", "income", "educ") # Define object specifying politically relevant demographics. pol_demos <- c("ideo", "pid3") # Define list of conditioning sets (NULL corresponds to Model 1, whereas the use # of demographic and political covariates corresponds to Model 2). covars <- list(NULL, c(demos, pol_demos)) ### Produce analysis. ## Produce balance table. # Specify covariates to be used for balance table. bal_covars <- c("age", "female", "northeast", "midwest", "west", "south", "income", "educ", "ideo", "pid3") # Produce balance table matrix output, looping over treatment group. bal_mat <- lapply(0:4, function (i) { # For each treatment value ... apply(tpnw[bal_covars][tpnw$treatment == i,], 2, function (x) { # Calculate the mean of each covariate. mean_x <- mean(x) # Calculate SE estimates using 10,000 bootstrap replicates. sd_x <- sd(replicate(10000, { samp <- x[sample(length(x), replace = TRUE)] return(mean(samp)) })) # Return a list containing both point estimates. return(list(mean = mean_x, sd = sd_x)) }) }) # Bind point estimates for each treatment group. bal_mat <- lapply(bal_mat, function (treat) { do.call("rbind", unlist(treat, recursive = FALSE)) }) # Convert list into a matrix, with columns representing treatment group. bal_mat <- do.call("cbind", bal_mat) # Round all estimates to within three decimal points and convert to character # for the purposes of producing tabular output. bal_tab <- apply(bal_mat, 2, function (x) format(round(x, 3), digits = 3)) # Specify rows containing mean point estimates. mean_rows <- endsWith(rownames(bal_tab), ".mean") # Specify rows containing SE point estimates. se_rows <- endsWith(rownames(bal_tab), ".sd") # Reformat SE estimates to be within parentheses. bal_tab[se_rows,] <- apply(bal_tab[se_rows,], 2, function (x) { paste0("(", x, ")") }) # Remove row names for rows with SE estimates. rownames(bal_tab)[se_rows] <- "" # Remove ".mean" string in row names for rows with mean estimates. rownames(bal_tab)[mean_rows] <- gsub(".mean", "", rownames(bal_tab)[mean_rows]) # Concatenate data to comport with LaTeX tabular markup. bal_tab <- paste(paste(paste( capwords(rownames(bal_tab)), apply(bal_tab, 1, function (x) { paste(x, collapse = " & ") }), sep = " & "), collapse = " \\\\\n"), "\\\\\n") bal_tab <- gsub("\\( ", "\\(", bal_tab) # Produce tabular output. sink("output/balance_tab.tex") cat("\\begin{table}\n", "\\caption{Covariate Balance Across Treatment Arms}\n", "\\centering\\small\n", "\\sisetup{\n", "\tdetect-all,\n", "\ttable-number-alignment = center,\n", "\ttable-figures-integer = 1,\n", "\ttable-figures-decimal = 3,\n", "\tinput-symbols = {()}\n", "}\n", paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{2.75cm}*{5}", "{S[table-number-alignment = center, table-column-width = 1.75cm]}}\n"), "\\toprule\n", "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n", "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n", bal_tab, "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n") sink() ## Produce main results. # Compute main results, looping over conditioning sets. main_results <- lapply(covars, function (covar) { # For each conditioning set ... # Specify the relevant regression formula. form <- as.formula(paste(join_tpnw, paste(c(treats, covar), collapse = " + "), sep = " ~ ")) # Fit the OLS model per the specification. fit <- lm(form, data = tpnw) # Compute HC2 robust standard errors. ses <- sqrt(diag(vcovHC(fit, type = "HC2"))) # Bind coefficient and SE output. reg_out <- cbind(fit$coef[2:5], ses[2:5]) # Name output matrix columns and rows. colnames(reg_out) <- c("coef", "se") rownames(reg_out) <- treats # Return output return(as.data.frame(reg_out)) }) # Name results to distinguish between Model 1 and Model 2 estimates. names(main_results) <- c("model_1", "model_2") ## Assess significance of effect estimates and differences. # Estimate Bonferroni-Holm-adjusted p-values. bf_ps <- lapply(main_results, function (x) { round(p.adjust(pnorm(x[, 1] / x[, 2], lower.tail = TRUE), method = "holm"), 3) }) # Estimate FDR-adjusted p-values, as an added robustness check. fdr_ps <- lapply(main_results, function (x) { round(p.adjust(pnorm(x[, 1] / x[, 2], lower.tail = TRUE), method = "fdr"), 3) }) # Redefine the main model (Model 2), and store full VCOV matrix. main_model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue + institutions_cue + age + female + midwest + west + south + income + educ + ideo + pid3, tpnw) main_vcov <- vcovHC(main_model, "HC2") # Specify diff_sig function for assessing significance between two effect # estimates (defined here for the sake of clarity). diff_sig <- function (eff_1, eff_2) { diff <- main_model$coef[eff_1] - main_model$coef[eff_2] se <- sqrt(main_vcov[eff_1, eff_1] + main_vcov[eff_2, eff_2] - 2 * main_vcov[eff_1, eff_2]) p <- 2 * (1 - pnorm(abs(diff) / se)) return (p) } # Assess the significance of the difference between institution and security cue # effect estimates . inst_sec_diff_p <- diff_sig("institutions_cue", "security_cue") # Assess the significance of the difference between institution and group cue # effect estimates inst_grp_diff_p <- diff_sig("institutions_cue", "group_cue") # Assess the significance of the difference between security and group cue # effect estimates sec_grp_diff_p <- diff_sig("security_cue", "group_cue") # Assess the significance of the difference between security and norms cue # effect estimates sec_norms_diff_p <- diff_sig("security_cue", "norms_cue") # Assess the significance of the difference between institution and group cue # effect estimates inst_norms_diff_p <- diff_sig("institutions_cue", "norms_cue") # Assess the significance of the difference between institution and group cue # effect estimates grp_norms_diff_p <- diff_sig("group_cue", "norms_cue") # The significance of differences between effect estimates was also assessed # using 10,000 bootstrap replicates and two-tailed p-values; relevant code is # included below with the institutions and security cues, for posterity, but is # not run. # Compute SE estimates. # diffs <- replicate(10000, { # samp <- tpnw[sample(nrow(tpnw), replace = TRUE),] # model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue + # institutions_cue + age + female + midwest + # west + south + income + educ + ideo + pid3, samp) # model$coef[5] - model$coef[3] # }) # diffs_se <- sd(diffs) # # # Fit model. # model <- lm(join_tpnw ~ group_cue + security_cue + norms_cue + # institutions_cue + age + female + midwest + # west + south + income + educ + ideo + pid3, tpnw) # # # Compute two-tailed p-value. # 2 * (1 - pnorm(abs((model$coef[5] - model$coef[3])/diffs_se))) ## Assess YouGov results. # Tabulate responses. aware_table <- table(aware$awareness, useNA = "ifany") names(aware_table) <- c("Yes, support", "Yes, oppose", "No, support", "No, oppose", "Skipped") # Compute both weighted and unweighted means. aware_results <- lapply(1:4, function (resp) { # Calculate weighted mean. wt_mean <- with(aware, weighted.mean(awareness == resp, w = weight, na.rm = TRUE)) # Calculate raw mean. rw_mean <- with(aware, mean(awareness == resp, na.rm = TRUE)) # Concatenate means and rename vector. means <- c(wt_mean, rw_mean) names(means) <- c("weighted_mean", "raw_mean") # Calculate SE estimates with 10,000 bootstrap replicates. ses <- replicate(10000, { samp <- aware[sample(nrow(aware), replace = TRUE),] wt_mean <- with(samp, weighted.mean(awareness == resp, w = weight, na.rm = TRUE)) rw_mean <- with(samp, mean(awareness == resp, na.rm = TRUE)) return(c(wt_mean, rw_mean)) }) ses <- apply(ses, 1, sd) names(ses) <- c("weighted_mean", "raw_mean") # Bind mean and SE estimates. outs <- rbind(means, ses) rownames(outs) <- paste(names(aware_table)[resp], c("mean", "se"), sep = "_") return(outs) }) # Name results to distinguish between responses. names(aware_results) <- c("Yes, support", "Yes, oppose", "No, support", "No, oppose") ## Assess covariate means for experimental and YouGov data (used in Table A1). # Indicate the list of covariates to be assessed. demo_tab_vars <- c("age", "female", "northeast", "midwest", "west", "south") # Compute covariate averages for experimental data. tpnw_means <- apply(tpnw[demo_tab_vars], 2, mean, na.rm = TRUE) # Compute covariate averages for YouGov data. aware_means <- apply(aware[demo_tab_vars], 2, function (x) { weighted.mean(x, na.rm = TRUE, w = aware$weight) }) # Compute bootstrap standard errors for demographic means. demo_ses <- replicate(10000, { # Sample the experimental data. samp_tpnw <- tpnw[sample(nrow(tpnw), replace = TRUE), demo_tab_vars] # Sample the YouGov data. samp_aware <- aware[sample(nrow(aware), replace = TRUE), c(demo_tab_vars, "weight")] # Compute bootstrap means for experimental data. tpnw_means <- apply(samp_tpnw[demo_tab_vars], 2, mean, na.rm = TRUE) # Compute bootstrap means for YouGov data. aware_means <- apply(samp_aware[demo_tab_vars], 2, function (x) { weighted.mean(x, na.rm = TRUE, w = samp_aware$weight) }) # Return the results as a list, and ensure that replicate() also returns a # list. return(list(tpnw = tpnw_means, aware = aware_means)) }, simplify = FALSE) # Compute SE estimates for each set of demographics. demo_ses <- lapply(c("tpnw", "aware"), function (dataset) { # Group all estimates from each dataset. sep_res <- lapply(demo_ses, function (iteration) { return(iteration[[dataset]]) }) # Bind estimates. sep_res <- do.call("rbind", sep_res) # Compute SE estimates. sep_ses <- apply(sep_res, 2, sd) # Return SE estimates. return(sep_ses) }) ## Assess responses to the attitudinal battery. # Assess responses to the attitudinal battery, looping over treatment group. For # each treatment value ... att_results <- lapply(0:4, function (i) { # Calculate the average response to each attitudinal battery question. atts_mean <- apply(tpnw[tpnw$treatment == i, tpnw_atts], 2, function (x) { mean(x, na.rm = TRUE) }) # Calculate SE estimates using 10,000 bootstrap replicates. bl_atts_boot <- replicate(10000, { dat <- tpnw[tpnw$treatment == i, tpnw_atts] samp <- dat[sample(nrow(dat), replace = TRUE),] apply(samp, 2, function (x) mean(x, na.rm = TRUE)) }) bl_atts_ses <- apply(bl_atts_boot, 1, sd) # Combine mean and SE estimates and return results. return(cbind(atts_mean, bl_atts_ses)) }) # Compute treatment effects on responses to the attitudinal battery, looping # over conditioning sets. att_effs <- lapply(covars, function (covar) { # For each conditioning set ... model_res <- lapply(tpnw_atts, function (out) { # Specify the relevant regression formula. form <- as.formula(paste(out, paste(c(treats, covar), collapse = " + "), sep = " ~ ")) # Fit the OLS model per the specification. fit <- lm(form, data = tpnw) # Compute HC2 robust standard errors. ses <- sqrt(diag(vcovHC(fit, type = "HC2"))) # Bind coefficient and SE output. reg_out <- cbind(fit$coef[2:5], ses[2:5]) # Name output matrix columns and rows. colnames(reg_out) <- c("coef", "se") rownames(reg_out) <- treats # Return output. return(as.data.frame(reg_out)) }) # Name results to distinguish between each attitudinal battery # outcome and return results. names(model_res) <- tpnw_atts return(model_res) }) # Name results to distinguish between Model 1 and Model 2 estimates. names(att_effs) <- c("model_1", "model_2") ## Perform subgroup analysis. # Compute mean support by political party, looping over treatment group. pid_results <- lapply(0:4, function (treat) { # For each partisan group ... out <- lapply(-1:1, function (i) { # Calculate average support. pid_mean <- with(tpnw, mean(join_tpnw[pid3 == i & treatment == treat], na.rm = TRUE)) # Calculate SE estimates with 10,000 # bootstrap replicates. pid_boot <- replicate(10000, { dat <- tpnw$join_tpnw[tpnw$pid3 == i & tpnw$treatment == treat] samp <- dat[sample(length(dat), replace = TRUE)] mean(samp, na.rm = TRUE) }) # Concatenate and return mean and SE # estimates. return(c(mean = pid_mean, se = sd(pid_boot))) }) # Name results to distinguish estimates by political party, # and return output. names(out) <- c("dem", "ind", "rep") return(as.data.frame(out)) }) # Name results to distinguish between treatment groups. names(pid_results) <- c("Control", paste(c("Group", "Security", "Norms", "Institutions"), "Cue")) # Assess significance between control-group means; for 10,000 bootstrap # replicates ... pid_diff_ses <- replicate(10000, { # Sample with replacement. samp <- tpnw[sample(nrow(tpnw), replace = TRUE),] # Compute the difference between Democrats' and # Independents' support. dem_ind_diff <- with(samp[samp$treatment == 0,], mean(join_tpnw[pid3 == -1], na.rm = TRUE) - mean(join_tpnw[pid3 == 0], na.rm = TRUE)) # Compute the difference between Democrats' and # Republicans' support. dem_rep_diff <- with(samp[samp$treatment == 0,], mean(join_tpnw[pid3 == -1], na.rm = TRUE) - mean(join_tpnw[pid3 == 1], na.rm = TRUE)) # Compute the difference between Independents' and # Republicans' support. ind_rep_diff <- with(samp[samp$treatment == 0,], mean(join_tpnw[pid3 == 1], na.rm = TRUE) - mean(join_tpnw[pid3 == 0], na.rm = TRUE)) # Concatenate and name results. out <- c(dem_ind_diff, dem_rep_diff, ind_rep_diff) names(out) <- c("dem_ind", "dem_rep", "ind_rep") return(out) }) # Compute SE estimates for each difference. pid_diff_ses <- apply(pid_diff_ses, 1, sd) # Assess significance for each difference. dem_ind_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "dem"] - pid_results$Control["mean", "ind"]) / pid_diff_ses["dem_ind"])) dem_rep_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "dem"] - pid_results$Control["mean", "rep"]) / pid_diff_ses["dem_rep"])) ind_rep_p <- 2 * (1 - pnorm(abs(pid_results$Control["mean", "ind"] - pid_results$Control["mean", "rep"]) / pid_diff_ses["ind_rep"])) # Compute mean support by political ideology, looping over treatment group. tpnw$ideo <- recode(tpnw$ideo, "c(-2, -1) = 'liberal'; 0 = 'moderate'; c(1, 2) = 'conservative'") ideo_results <- lapply(0:4, function (treat) { # For each ideological group ... out <- lapply(c("liberal", "moderate", "conservative"), function (i) { # Calculate average support. pid_mean <- with(tpnw, mean(join_tpnw[ideo == i & treatment == treat], na.rm = TRUE)) # Calculate SE estimates with 10,000 # bootstrap replicates. pid_boot <- replicate(10000, { dat <- tpnw$join_tpnw[tpnw$ideo == i & tpnw$treatment == treat] samp <- dat[sample(length(dat), replace = TRUE)] mean(samp, na.rm = TRUE) }) # Concatenate and return mean and SE # estimates. return(c(mean = pid_mean, se = sd(pid_boot))) }) # Name results to distinguish estimates by political ideology, # and return output. names(out) <- c("liberal", "moderate", "conservative") return(as.data.frame(out)) }) # Name results to distinguish between treatment groups. names(ideo_results) <- c("Control", paste(c("Group", "Security", "Norms", "Institutions"), "Cue")) ## Produce weighted main results. # Compute weighted main results, looping over conditioning sets. w_main_results <- lapply(covars, function (covar) { # For each conditioning set ... # Specify the relevant regression formula. form <- as.formula(paste(join_tpnw, paste(c(treats, covar), collapse = " + "), sep = " ~ ")) # Fit the OLS model per the specification. fit <- lm(form, data = tpnw, weights = anesrake_weight) # Compute HC2 robust standard errors. ses <- sqrt(diag(vcovHC(fit, type = "HC2"))) # Bind coefficient and SE output. reg_out <- cbind(fit$coef[2:5], ses[2:5]) # Name output matrix columns and rows. colnames(reg_out) <- c("coef", "se") rownames(reg_out) <- treats # Return output return(as.data.frame(reg_out)) }) # Name results to distinguish between Model 1 and Model 2 estimates. names(w_main_results) <- c("model_1", "model_2") ### Produce plots and tables. ## Produce main results plot. # Produce main results matrix for plotting. main_mat <- do.call("rbind", lapply(1:2, function (model) { cbind(main_results[[model]], model) })) # Store values for constructing 90- and 95-percent CIs. z_90 <- qnorm(.95) z_95 <- qnorm(.975) # Open new pdf device. setEPS() postscript("output/fg1.eps", width = 8, height = 5.5) # Define custom graphical parameters. par(mar = c(8, 7, 2, 2)) # Open new, empty plot. plot(0, type = "n", axes = FALSE, ann = FALSE, xlim = c(-.3, .05), ylim = c(.8, 4)) # Produce guidelines to go behind point estimates and error bars. abline(v = seq(-.3, .05, .05)[-7], col = "lightgrey", lty = 3) # Add Model 1 point estimates. par(new = TRUE) plot(x = main_mat$coef[main_mat$model == 1], y = 1:4 + .05, xlim = c(-.3, .05), ylim = c(.8, 4), pch = 16, col = "steelblue2", xlab = "", ylab = "", axes = FALSE) # Add Model 2 point estimates. par(new = TRUE) plot(x = main_mat$coef[main_mat$model == 2], y = 1:4 - .05, xlim = c(-.3, .05), ylim = c(.8, 4), pch = 16, col = "#FF8F37", main = "", xlab = "", ylab = "", axes = FALSE) # Add horizontal axis indicating effect estimate size. axis(side = 1, at = round(seq(-.3, 0, .05), 2), labels = FALSE) mtext(side = 1, at = seq(-.3, .1, .1), text = c("-30", "-20", "-10", "0"), cex = .9, line = .75) axis(side = 1, at = round(seq(-.25, .05, .05), 2), tck = -.01, labels = FALSE) # Add vertical axis specifying treatment names corresponding to point estimates. axis(side = 2, at = 1:4, labels = FALSE) mtext(side = 2, line = .75, at = 1:4, text = paste(c("Group", "Security", "Norms", "Institutions"), "Cue"), las = 1, padj = .35, cex = .9) # Add axis labels. mtext(side = 2, line = 2.3, at = 4.2, text = "Treatment", font = 2, las = 1, xpd = TRUE) mtext(side = 1, text = "Estimated Effect Size", line = 2.5, at = -.15, font = 2) # Add a dashed line at zero. abline(v = 0.00, lty = 2) # Add two-sided, 90-percent CIs. with(main_mat[main_mat$model == 1,], segments(x0 = coef - z_90 * se, y0 = 1:4 + .05, x1 = coef + z_90 * se, y1 = 1:4 + .05, col = "steelblue2", lwd = 3)) with(main_mat[main_mat$model == 2,], segments(x0 = coef - z_90 * se, y0 = 1:4 - .05, x1 = coef + z_90 * se, y1 = 1:4 - .05, col = "#FF8F37", lwd = 3)) # Add two-sided 95-percent CIs. with(main_mat[main_mat$model == 1,], segments(x0 = coef - z_95 *se, y0 = 1:4 + .05, x1 = coef + z_95 *se, y1 = 1:4 + .05, col = "steelblue2", lwd = 1)) with(main_mat[main_mat$model == 2,], segments(x0 = coef - z_95 *se, y0 = 1:4 - .05, x1 = coef + z_95 *se, y1 = 1:4 - .05, col = "#FF8F37", lwd = 1)) # Add legend. legend(legend = paste("Model", 1:2), x = -.15, y = -.275, horiz = TRUE, pch = 16, col = c("steelblue2", "#FF8F37"), xjust = .5, xpd = TRUE, text.width = .05, cex = .9) # Draw a box around the plot. box() # Close the grpahical device. dev.off() ## Create tabular output for main results. # Define matrix object of main results. tab_dat <- do.call("cbind", main_results) # Compute control-group means, with SE estimates; define OLS formula. ctrl_form <- as.formula(paste(join_tpnw, paste(treats, collapse = " + "), sep = " ~ ")) # Fit the OLS model per the specification and recover the control mean. ctrl_fit <- lm(ctrl_form, data = tpnw) # Recover the control-group mean. ctrl_mean <- ctrl_fit$coef["(Intercept)"] # Compute control SE. ctrl_se <- sqrt(diag(vcovHC(ctrl_fit, "HC2")))["(Intercept)"] # Concatenate mean and SE output with blank values for Model 2. ctrl_results <- c(format(round(c(ctrl_mean, ctrl_se), 3) * 100, digits = 2), "|", "|") # Reformat data to include a decimal point. tab_dat <- apply(tab_dat, 2, function (y) format(round(y, 3) * 100, digits = 2)) # Bind control-group means with main results data. tab <- rbind(ctrl_results, tab_dat) # Rename row containing control-group means. rownames(tab)[which(rownames(tab) == "1")] <- "control_mean" # Relabel coefficient columns. coef_cols <- grep("coef$", colnames(tab)) # Relabel SE columns. se_cols <- grep("se$", colnames(tab)) # Reformat SE estimates to be within parentheses. tab[,se_cols] <- apply(tab[, se_cols], 2, function (y) paste0("(", y, ")")) # Concatenate data to comport with LaTeX tabular markup. tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(tab))), apply(tab, 1, function (x) { paste(x, collapse = " & ") }), sep = " & "), collapse = " \\\\\n"), "\\\\\n") # Produce tabular output. sink("output/main_results_tab.tex") cat("\\begin{table}\n", "\\caption{Estimated Treatment Effects on Support for TPNW}\n", "\\begin{adjustbox}{width = \\textwidth, center}\n", "\\sisetup{\n", "\tdetect-all,\n", "\ttable-number-alignment = center,\n", "\ttable-figures-integer = 1,\n", "\ttable-figures-decimal = 3,\n", "\ttable-space-text-post = *,\n", "\tinput-symbols = {()}\n", "}\n", paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{4}", "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"), "\\toprule\n", "& \\multicolumn{4}{c}{Model}\\\\\\cmidrule{2-5}\n", "& \\multicolumn{2}{c}{{(1)}} & \\multicolumn{2}{c}{{(2)}} \\\\\\midrule\n", tab, "\\bottomrule\n", "\\end{tabular}\n", "\\end{adjustbox}\n", "\\end{table}\n") sink() ## Create tabular output for YouGov results. # Restructure data as a matrix. aware_tab <- rbind(do.call("rbind", aware_results)) # Reformat data to include three decimal points. aware_tab <- apply(aware_tab, 2, function (y) format(round(y, 3) * 100, digits = 3)) # Relabel mean rows. mean_rows <- endsWith(rownames(aware_tab), "mean") # Relabel SE rows. se_rows <- endsWith(rownames(aware_tab), "se") # Reformat SE estimates to be within parentheses. aware_tab[se_rows,] <- paste0("(", aware_tab[se_rows,], ")") # Remove row names for rows with SE estimates. rownames(aware_tab)[se_rows] <- "" # Remove "_mean" indication in mean_rows. rownames(aware_tab)[mean_rows] <- gsub("_mean", "", rownames(aware_tab)[mean_rows]) # Add an empty row, where excluded calculations of responses among skips are # noted in the table, and rename the relevant row. aware_tab <- rbind(aware_tab, c("|", "|")) rownames(aware_tab)[nrow(aware_tab)] <- "Skipped" # Add an empty column to the table, and insert the count column at the relevant # indices. aware_tab[which(rownames(aware_tab) %in% names(aware_table)),] aware_tab <- cbind(aware_tab, "") colnames(aware_tab)[ncol(aware_tab)] <- "N" aware_tab[which(rownames(aware_tab) %in% names(aware_table)), "N"] <- aware_table # Concatenate data to comport with LaTeX tabular markup. aware_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(aware_tab))), apply(aware_tab, 1, function (x) { paste(x, collapse = " & ") }), sep = " & "), collapse = " \\\\\n"), "\\\\\n") # Produce tabular output. sink("output/yougov_tab.tex") cat("\\begin{table}\n", "\\caption{YouGov Survey Responses}\n", "\\centering\\small\n", "\\sisetup{\n", "\tdetect-all,\n", "\ttable-number-alignment = center,\n", "\ttable-figures-integer = 1,\n", "\ttable-figures-decimal = 3,\n", "\tinput-symbols = {()}\n", "}\n", paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}", "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"), "\\toprule\n", "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n", "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n", aware_tab, "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n") sink() ## Create tabular output for attitudinal results. # Define matrix object of main results. tab_dat <- do.call("cbind", att_results) # Reformat matrix to alternate mean and SE estimates. tab <- sapply(seq(0, 8, 2), function (i) { matrix(c(t(tab_dat[,1:2 + i])), 14, 1) }) # Reformat data to include three decimal points. tab <- apply(tab, 2, function (y) format(round(y, 3), digits = 3)) # Rename rows to indicate mean and SE estimates. rownames(tab) <- paste(rep(rownames(tab_dat), each = 2), c("mean", "se"), sep = "_") # Relabel mean rows. mean_rows <- grep("_mean", rownames(tab)) # Relabel SE rows se_rows <- grep("_se", rownames(tab)) # Reformat SE estimates to be within parentheses. tab[se_rows,] <- apply(tab[se_rows,], 1, function (y) { paste0("(", gsub(" ", "", y), ")") }) # Rename rows to improve tabular labels; remove "tpnw_atts, "mean," and "se" row # name strings. rownames(tab) <- gsub("tpnw_atts|mean$|se$", "", rownames(tab)) # Remove leading and tailing underscores. rownames(tab) <- gsub("^_|_$", "", rownames(tab)) # Remove row names for rows with SE estimates. rownames(tab)[se_rows] <- "" # Concatenate data to comport with LaTeX tabular markup. tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(tab))), apply(tab, 1, function (x) { paste(x, collapse = " & ") }), sep = " & "), collapse = " \\\\\n"), "\\\\\n") # Produce tabular output. sink("output/atts_tab.tex") cat("\\begin{table}\n", "\\caption{Attitudes Toward Nuclear Weapons by Arm}\n", "\\centering\\small\n", "\\sisetup{\n", "\tdetect-all,\n", "\ttable-number-alignment = center,\n", "\ttable-figures-integer = 1,\n", "\ttable-figures-decimal = 3,\n", "\ttable-space-text-post = *,\n", "\tinput-symbols = {()}\n", "}\n", paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}", "{S[table-number-alignment = center, table-column-width=1.25cm]}}\n"), "\\toprule\n", "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n", "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n", tab, "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n") sink() ## Create tabular output for results by political party. # Restructure data such that mean and SE estimates are alternating rows in a # 1 x 6 matrix, in each of five list elements, corresponding to each treatment # group; and bind the results for each treatment group. pid_tab <- lapply(pid_results, function (x) { matrix(unlist(x), nrow = 6, ncol = 1) }) pid_tab <- do.call("cbind", pid_tab) # Assign row names to distinguish results for each partisan group, and mean and # SE estimates. rownames(pid_tab) <- paste(rep(c("democrat", "independent", "republican"), each = 2), c("mean", "se")) # Relabel mean rows. mean_rows <- endsWith(rownames(pid_tab), "mean") # Relabel SE rows. se_rows <- endsWith(rownames(pid_tab), "se") # Label columns per treatment, for the computation of ATEs. colnames(pid_tab) <- c("control", treats) # Compute ATEs, with control as baseline, and update tabular data. pid_tab[mean_rows, treats] <- pid_tab[mean_rows, treats] - pid_tab[mean_rows, "control"] # Reformat data to include three decimal points. pid_tab <- apply(pid_tab, 2, function (y) format(round(y, 3) * 100, digits = 3)) # Remove extraneous spacing. pid_tab <- gsub(" ", "", pid_tab) # Reformat SE estimates to be within parentheses. pid_tab[se_rows,] <- paste0("(", pid_tab[se_rows,], ")") # Remove row names for rows with SE estimates. rownames(pid_tab)[se_rows] <- "" # Concatenate data to comport with LaTeX tabular markup. pid_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(pid_tab))), apply(pid_tab, 1, function (x) { paste(x, collapse = " & ") }), sep = " & "), collapse = " \\\\\n"), "\\\\\n") # Produce tabular output. sink("output/pid_support.tex") cat("\\begin{table}\n", "\\caption{Support for Joining TPNW by Party ID}\n", "\\centering\\small\n", "\\sisetup{\n", "\tdetect-all,\n", "\ttable-number-alignment = center,\n", "\ttable-figures-integer = 1,\n", "\ttable-figures-decimal = 3,\n", "\tinput-symbols = {()}\n", "}\n", paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}", "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"), "\\toprule\n", "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n", "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n", pid_tab, "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n") sink() ## Create tabular output for results by political ideology. # Restructure data such that mean and SE estimates are alternating rows in a # 1 x 6 matrix, in each of five list elements, corresponding to each treatment # group; and bind the results for each treatment group. ideo_tab <- lapply(ideo_results, function (x) { matrix(unlist(x), nrow = 6, ncol = 1) }) ideo_tab <- do.call("cbind", ideo_tab) # Assign row names to distinguish results for each idelogical group, and mean # and SE estimates. rownames(ideo_tab) <- paste(rep(c("liberal", "moderate", "conservative"), each = 2), c("mean", "se")) # Reformat data to include three decimal points. ideo_tab <- apply(ideo_tab, 2, function (y) format(round(y, 3) * 100, digits = 3)) # Relabel mean rows. mean_rows <- endsWith(rownames(ideo_tab), "mean") # Relabel SE rows. se_rows <- endsWith(rownames(ideo_tab), "se") # Reformat SE estimates to be within parentheses. ideo_tab[se_rows,] <- paste0("(", ideo_tab[se_rows,], ")") # Remove row names for rows with SE estimates. rownames(ideo_tab)[se_rows] <- "" # Concatenate data to comport with LaTeX tabular markup. ideo_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(ideo_tab))), apply(ideo_tab, 1, function (x) { paste(x, collapse = " & ") }), sep = " & "), collapse = " \\\\\n"), "\\\\\n") # Produce tabular output. sink("output/ideo_support_tab.tex") cat("\\begin{table}\n", "\\caption{Support for Joining TPNW by Ideology}\n", "\\centering\\small\n", "\\sisetup{\n", "\tdetect-all,\n", "\ttable-number-alignment = center,\n", "\ttable-figures-integer = 1,\n", "\ttable-figures-decimal = 3,\n", "\tinput-symbols = {()}\n", "}\n", paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{5}", "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"), "\\toprule\n", "& \\multicolumn{5}{c}{Arm}\\\\\\cmidrule{2-6}\n", "& {Control} & {Group} & {Security} & {Norms} & {Institutions} \\\\\\midrule\n", ideo_tab, "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n") sink() ## Create tabular output for weighted main results. # Define matrix object of weighted main results. w_tab_dat <- do.call("cbind", w_main_results) # Compute weighted control-group means, with SE estimates; define OLS formula. w_ctrl_form <- as.formula(paste(join_tpnw, paste(treats, collapse = " + "), sep = " ~ ")) # Fit the OLS model per the specification and recover the control mean. w_ctrl_fit <- lm(w_ctrl_form, data = tpnw, weights = anesrake_weight) # Recover the control-group mean. w_ctrl_mean <- w_ctrl_fit$coef["(Intercept)"] # Compute control SE. w_ctrl_se <- sqrt(diag(vcovHC(w_ctrl_fit, "HC2")))["(Intercept)"] # Concatenate mean and SE output with blank values for Model 2. w_ctrl_results <- c(format(round(c(w_ctrl_mean, w_ctrl_se), 3) * 100, digits = 2), "|", "|") # Reformat data to include a decimal point. w_tab_dat <- apply(w_tab_dat, 2, function (y) format(round(y, 3) * 100, digits = 2)) # Bind control-group means with main results data. w_tab <- rbind(w_ctrl_results, w_tab_dat) # Rename row containing control-group means. rownames(w_tab)[which(rownames(w_tab) == "1")] <- "control_mean" # Relabel coefficient columns. coef_cols <- grep("coef$", colnames(w_tab)) # Relabel SE columns. se_cols <- grep("se$", colnames(w_tab)) # Reformat SE estimates to be within parentheses. w_tab[,se_cols] <- apply(w_tab[, se_cols], 2, function (y) paste0("(", y, ")")) # Concatenate data to comport with LaTeX tabular markup. w_tab <- paste(paste(paste(capwords(gsub("_", " ", rownames(w_tab))), apply(w_tab, 1, function (x) { paste(x, collapse = " & ") }), sep = " & "), collapse = " \\\\\n"), "\\\\\n") # Produce tabular output. sink("output/weighted_main_results_tab.tex") cat("\\begin{table}\n", "\\caption{Estimated Treatment Effects on Support for TPNW (Weighted)}\n", "\\begin{adjustbox}{width = \\textwidth, center}\n", "\\sisetup{\n", "\tdetect-all,\n", "\ttable-number-alignment = center,\n", "\ttable-figures-integer = 1,\n", "\ttable-figures-decimal = 3,\n", "\ttable-space-text-post = *,\n", "\tinput-symbols = {()}\n", "}\n", paste0("\\begin{tabular}{@{\\extracolsep{5pt}}L{3.5cm}*{4}", "{S[table-number-alignment = right, table-column-width=1.25cm]}}\n"), "\\toprule\n", "& \\multicolumn{4}{c}{Model}\\\\\\cmidrule{2-5}\n", "& \\multicolumn{2}{c}{{(1)}} & \\multicolumn{2}{c}{{(2)}} \\\\\\midrule\n", w_tab, "\\bottomrule\n", "\\end{tabular}\n", "\\end{adjustbox}\n", "\\end{table}\n") sink() ### Save image containing all objects. save.image(file = "output/hbg_replication_out.RData")