|
|
import sys |
|
|
import os |
|
|
import pandas as pd |
|
|
import random |
|
|
import re |
|
|
import string |
|
|
from functools import reduce |
|
|
import numpy as np |
|
|
import math |
|
|
|
|
|
from datetime import datetime, timedelta |
|
|
|
|
|
from lib.data_helpers import data_utils |
|
|
from lib.experiment_specs import study_config |
|
|
from lib.utilities import codebook |
|
|
from lib.utilities import serialize |
|
|
from lib.data_helpers.confidential import Confidential |
|
|
from lib.data_helpers.manual_changes import ManualChanges |
|
|
from lib.data_helpers.builder_utils import BuilderUtils |
|
|
|
|
|
random.seed(23555) |
|
|
|
|
|
""" |
|
|
Purpose: Cleans Qualtrics surveys based on their configs in study_config |
|
|
Input: |
|
|
- Survey Name only input survey names found in data/{study}/configurations/study_config |
|
|
- input_dir: directory of raw file |
|
|
- output_dir: directory of clean_master file |
|
|
- remove_embedded: if True, removes the embedded data from a raw qualtrics survey |
|
|
- test: bool - if True, keeps all banned email responses |
|
|
- codebook: bool - if True, the qualtrics cleaner will update the master codebook. for check_survey.py, this is set to False, and for the main pipeline, this is set to true |
|
|
- helper_paths : the dictionary specifies the absolute path locations of 'Manual Changes' an excel spreadsheet where specify |
|
|
changes to the survey answers, and 'Testers' where we specify information for the PhoneAddictionTeam |
|
|
{'ManualChanges':manual_changes_path, 'Testers':testers_path} |
|
|
|
|
|
MainFunction: clean_survey |
|
|
|
|
|
Output: Clean survey file |
|
|
|
|
|
Note: DON'T MAKE ANY NEW VARS -- THEY WILL BE DELETED BECAUSE WE CROP ALL DATA BEFORE THE FIRST VAR AND ALL DATA AFTER THE LAST VAR |
|
|
|
|
|
Note: all variables are converted to strings, and NaN will be coded as "nan" |
|
|
""" |
|
|
|
|
|
|
|
|
class CleanSurvey(): |
|
|
|
|
|
default_helper_paths = {"Intermediate": os.path.join("data", "external", "intermediate"), |
|
|
"Confidential": os.path.join("data", "external", "dropbox_confidential")} |
|
|
|
|
|
|
|
|
def __init__(self, survey_name: str, input_dir: str, output_dir: str, |
|
|
remove_embedded: bool = True, test: bool = False, codebook_bool: bool = True, |
|
|
helper_paths: dict = default_helper_paths): |
|
|
|
|
|
self.survey_name = survey_name |
|
|
self.open_date_time = study_config.surveys[survey_name]["Start"] |
|
|
self.close_date_time = study_config.surveys[survey_name]["End"] |
|
|
self.code = study_config.surveys[survey_name]["Code"] |
|
|
self.raw_file_path = os.path.join(input_dir, survey_name + ".csv") |
|
|
self.clean_file_path = os.path.join(output_dir, survey_name + ".csv") |
|
|
|
|
|
self.intermediate_dir = helper_paths["Intermediate"] |
|
|
self.confidential_dir = helper_paths["Confidential"] |
|
|
self.testers = pd.read_excel(os.path.join(self.confidential_dir,"Testers.xlsm"), sheet_name="Testers") |
|
|
|
|
|
self.timezones_path = os.path.join(self.intermediate_dir,"Timezones") |
|
|
self.manual_changes_path = os.path.join(self.confidential_dir,"ManualChanges.xlsx") |
|
|
self.pii_path = Confidential.id_file |
|
|
|
|
|
|
|
|
self.remove_embedded = remove_embedded |
|
|
self.test = test |
|
|
self.codebook_bool = codebook_bool |
|
|
|
|
|
def clean_survey(self): |
|
|
raw_data_file = os.path.join(self.raw_file_path) |
|
|
print(f"\n Processing {self.survey_name}") |
|
|
df = pd.read_csv(raw_data_file, dtype=str) |
|
|
|
|
|
if len(df)==0: |
|
|
print(f"No Data in{self.survey_name}") |
|
|
return pd.DataFrame() |
|
|
|
|
|
print(f"obs before processing {len(df)-2}") |
|
|
df = self._process_col_names(df) |
|
|
df = self._process_col_values(df) |
|
|
|
|
|
""" we know the qualtricts datetimes are in eastern, so we |
|
|
adjust the qualtrics times to local using the timezones found in the PD data""" |
|
|
df = self._process_datetimes(df) |
|
|
df = self._filter_timeframe(df) |
|
|
df = self._filter_emails(df) |
|
|
df = self._validate_completes(df) |
|
|
|
|
|
if self.survey_name in study_config.text_surveys: |
|
|
df = self._reshape_text_survey(df) |
|
|
df = ManualChanges.manual_clean(df, self.survey_name, self.manual_changes_path) |
|
|
df.to_csv(self.clean_file_path, index=False) |
|
|
return df |
|
|
|
|
|
df = self._filter_duplicates(df) |
|
|
df = ManualChanges.manual_clean(df, self.survey_name, self.manual_changes_path) |
|
|
df = df.reset_index(drop=True) |
|
|
|
|
|
|
|
|
Confidential.build_id_map(df,self.survey_name, self.pii_path) |
|
|
Confidential.anonymize_cols(df) |
|
|
|
|
|
if self.code is not None: |
|
|
df = data_utils.add_survey_code(df, self.code) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
df.to_csv(self.clean_file_path, index=False) |
|
|
|
|
|
"""in prep for merge, remove embedded data. keep in csv exported above just for comparison""" |
|
|
if self.remove_embedded: |
|
|
df = self._remove_embedded_data(df) |
|
|
|
|
|
print("Created {0} with {1} entries".format(self.survey_name, len(df.index))) |
|
|
return df |
|
|
|
|
|
def _process_col_names(self, df: pd.DataFrame): |
|
|
df.columns = df.columns.str.replace(' ', '') |
|
|
df.columns = df.columns.str.replace('_', '') |
|
|
|
|
|
df = codebook.rename_vars(df) |
|
|
|
|
|
if self.codebook_bool: |
|
|
self._update_codebook(df) |
|
|
|
|
|
df = df.iloc[2:, :] |
|
|
df_dup = df.loc[:, df.columns.duplicated()] |
|
|
if len(list(df_dup.columns)) > 0: |
|
|
print(f" \t Duplicate Columns to delete: {df_dup.columns}") |
|
|
df = df.loc[:, ~df.columns.duplicated()] |
|
|
return df |
|
|
|
|
|
def _process_col_values(self, df): |
|
|
df = df.astype(str).applymap(lambda x: x.strip()) |
|
|
|
|
|
for p_col in ["PhoneNumber","PhoneNumberConfirm","FriendContact"]: |
|
|
if p_col in df.columns.values: |
|
|
df[p_col] = df[p_col].apply(lambda x: re.sub("[^0-9]", "", str(x))) |
|
|
df[p_col] = df[p_col].apply(lambda x: "1"+x if len(x)==10 else x) |
|
|
df[p_col] = df[p_col].apply(lambda x: x if len(x) == 11 else 'nan') |
|
|
|
|
|
|
|
|
if self.survey_name == "Baseline": |
|
|
print("\t dealing with dumb baseline bug to ensure appcode assertion passes") |
|
|
print(f"\t Len before dropping sherry {len(df)}") |
|
|
df = df.loc[df["MainEmail"]!="xy1087@nyu.edu"] |
|
|
print(f"\t len after dropping sherry {len(df)}") |
|
|
|
|
|
|
|
|
if "AppCode" not in df.columns.values: |
|
|
print("\t No AppCode in Survey") |
|
|
|
|
|
pii = serialize.open_pickle(self.pii_path).reset_index().rename(columns = {"index":"AppCode"}) |
|
|
email_col = study_config.surveys[self.survey_name]["RawEmailCol"] |
|
|
pii = pii.loc[pii["MainEmail"] != "nan",["AppCode", "MainEmail"]].rename(columns = {"MainEmail":email_col}) |
|
|
|
|
|
df = df.merge(pii, on=email_col, how='left') |
|
|
print("done merger") |
|
|
|
|
|
for appcode_col in ["AppCode", "AppCodeConfirm"]: |
|
|
if appcode_col in df.columns.values: |
|
|
df = data_utils.add_A_to_appcode(df,appcode_col) |
|
|
"If no appcode, assign 'UNASSIGNED_' + ResponseID as Appcode" |
|
|
df.loc[df[appcode_col]=="nan", appcode_col] = 'UNASSIGNED_'+df["ResponseID"] |
|
|
|
|
|
if "AppCode" not in df.columns: |
|
|
print("AppCode not in df.columns. Anonymize with Response ID") |
|
|
df["AppCode"] = 'UNASSIGNED_'+df["ResponseID"] |
|
|
|
|
|
|
|
|
if self.survey_name =="Recruitment": |
|
|
df["Age"] = df["Age"].astype(float) |
|
|
df.loc[(df["Age"]>=18)&(df["Age"]<=34),"AgeStrat"] = "18-34" |
|
|
df.loc[(df["Age"] >= 35) & (df["Age"] <= 50), "AgeStrat"] = "35-50" |
|
|
df.loc[(df["Age"] > 50), "AgeStrat"] = "50+" |
|
|
df["Age"] = df["Age"].fillna("nan").astype(str) |
|
|
|
|
|
|
|
|
return df |
|
|
|
|
|
def _process_datetimes(self,df): |
|
|
df['SurveyStartEasternDatetime'] = pd.to_datetime(df['SurveyStartDatetime'], infer_datetime_format=True) |
|
|
df['SurveyEndEasternDatetime'] = pd.to_datetime(df['SurveyEndDatetime'], infer_datetime_format=True) |
|
|
df.loc[:, 'OpenEasternDateTime'] = self.open_date_time |
|
|
df.loc[:, 'CloseEasternDateTime'] = self.close_date_time |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
timezones = serialize.open_pickle(self.timezones_path) |
|
|
df = df.merge(timezones, on = "AppCode", how = 'left') |
|
|
for time_var in ['SurveyStartEasternDatetime','SurveyEndEasternDatetime','OpenEasternDateTime','CloseEasternDateTime']: |
|
|
df[time_var.replace("Eastern","")]= df[time_var]+df["EastToLocal"] |
|
|
|
|
|
|
|
|
df.loc[df[time_var.replace("Eastern","")].isnull(),time_var.replace("Eastern","")] = df[time_var] |
|
|
df.loc[df["EastToLocal"].isnull(),"EastToLocal"] = timedelta(0) |
|
|
except: |
|
|
print("Could not merge status exporter likely because Appcode not in survey or because timezones file not in path") |
|
|
df['SurveyStartDatetime'] = pd.to_datetime(df['SurveyStartDatetime'], infer_datetime_format=True) |
|
|
df['SurveyEndDatetime'] = pd.to_datetime(df['SurveyEndDatetime'], infer_datetime_format=True) |
|
|
return df |
|
|
|
|
|
return df |
|
|
|
|
|
"""for each possible email column, drop nan's, drop banned emails, keep last of duplicate""" |
|
|
|
|
|
def _filter_emails(self, df): |
|
|
print(f"obs before filtering tester emails {len(df)}") |
|
|
other_email_cols = ['SchoolEmail', 'PreferEmail', 'RecipientEmail', "Email", "EmailConfirm", "Email.1"] |
|
|
|
|
|
if not self.test: |
|
|
for email_col in other_email_cols+["ParentEmail"]: |
|
|
if email_col in list(df.columns.values): |
|
|
df = df.loc[~df[email_col].isin(list(self.testers["Email"]))] |
|
|
else: |
|
|
print("Because self.test == True, all banned emails will be included!!") |
|
|
|
|
|
|
|
|
raw_email_col = study_config.surveys[self.survey_name]["RawEmailCol"] |
|
|
if raw_email_col != "MainEmail": |
|
|
|
|
|
|
|
|
if "MainEmail" in df.columns: |
|
|
df = df.drop(columns=["MainEmail"]) |
|
|
df = df.rename(columns={raw_email_col: "MainEmail"}) |
|
|
|
|
|
|
|
|
for col in other_email_cols: |
|
|
if col in df.columns: |
|
|
df = df.drop(columns=[col]) |
|
|
return df |
|
|
|
|
|
|
|
|
def _validate_completes(self, df): |
|
|
last_question = study_config.surveys[self.survey_name]["LastQuestion"] |
|
|
|
|
|
|
|
|
id_cols = list(study_config.id_cols.values()) |
|
|
assert last_question not in sum(id_cols, []) |
|
|
|
|
|
question_index = list(df.columns).index(last_question) |
|
|
survey_cols = list(df.columns)[:question_index+1:] |
|
|
reverse_survey_cols = survey_cols[::-1] |
|
|
|
|
|
complete_q = study_config.surveys[self.survey_name]["CompleteQuestion"] |
|
|
df.loc[(df["Finished"] == 'True') & (df[complete_q] != 'nan'), "Complete"] = "Complete" |
|
|
df.loc[df["Complete"] != 'Complete', "Complete"] = "UnfinishedOther" |
|
|
|
|
|
|
|
|
df_dict = df.to_dict(orient = 'index') |
|
|
for key, value in df_dict.copy().items(): |
|
|
if value["Complete"] == "Complete": |
|
|
continue |
|
|
else: |
|
|
for col in reverse_survey_cols: |
|
|
|
|
|
|
|
|
if (value[col] != "nan"): |
|
|
try: |
|
|
|
|
|
if "UNASSIGNED" not in str(value[col]): |
|
|
df_dict[key]["Complete"] = col |
|
|
break |
|
|
except: |
|
|
print("bug") |
|
|
df = pd.DataFrame.from_dict(df_dict, orient = "index") |
|
|
return df |
|
|
|
|
|
def _filter_duplicates(self, df): |
|
|
|
|
|
ranks = range(0,len(df.columns)) |
|
|
rank_of_cols = dict(zip(list(df.columns),ranks)) |
|
|
rank_of_cols["Complete"] = 9999999 |
|
|
|
|
|
df["CompleteRank"] = df["Complete"].apply(lambda x: rank_of_cols[x]) |
|
|
|
|
|
scratch_path = os.path.join(self.intermediate_dir, "Scratch") |
|
|
|
|
|
for dup_col in ["MainEmail","AppCode"]: |
|
|
if dup_col not in df.columns: |
|
|
print(f"{self.survey_name} doesn't have an {dup_col} column") |
|
|
else: |
|
|
print(f"obs before dropping dups of {dup_col}: {len(df)}") |
|
|
|
|
|
|
|
|
df = df.sort_values(by = [dup_col,"CompleteRank","SurveyStartDatetime"]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
df = df.loc[(~df.duplicated(subset=[dup_col], keep='last')) | (df[dup_col] == "nan")] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f"obs after dropping dups of {dup_col}: {len(df)}") |
|
|
df = df.sort_values("SurveyStartDatetime") |
|
|
return df |
|
|
|
|
|
def _filter_timeframe(self, df): |
|
|
print(f"obs before droppingpeople that began before survey start or ended after close {len(df)}") |
|
|
if self.test == False: |
|
|
df = df.loc[df['SurveyStartEasternDatetime'] >= df['OpenEasternDateTime']] |
|
|
df = df.loc[df['SurveyEndEasternDatetime'] <= df['CloseEasternDateTime']] |
|
|
return df |
|
|
|
|
|
def _update_codebook(self, df): |
|
|
codebook_dic = {} |
|
|
for col in df.columns: |
|
|
codebook_dic[col] = { |
|
|
"VariableLabel": str(df.loc[0,col]), |
|
|
"DataType": df.dtypes[col], |
|
|
"PrefixEncoding": "Survey" |
|
|
} |
|
|
|
|
|
timing_vars = ["Timing-ClickCount", |
|
|
"Timing-PageSubmit", |
|
|
"Timing-FirstClick", |
|
|
"Timing-LastClick", |
|
|
"BrowserMetaInfo"] |
|
|
for varname, chars in codebook_dic.copy().items(): |
|
|
if len([x for x in timing_vars if x in chars["VariableLabel"].replace(" ","")]) > 0: |
|
|
del codebook_dic[varname] |
|
|
|
|
|
codebook.add_vardic_to_codebook(codebook_dic) |
|
|
|
|
|
def _remove_embedded_data(self, df_f): |
|
|
|
|
|
|
|
|
unimportant_vars = [y for y in df_f if |
|
|
any(x in y for x in ["FirstClick", "LastClick", "PageSubmit", "ClickCount"])] |
|
|
df_f = df_f.drop(columns=unimportant_vars) |
|
|
|
|
|
|
|
|
non_survey_vars_to_keep = study_config.main_cols + [f"{self.code}_{x}" for x in study_config.kept_survey_data] |
|
|
for question_type in ["FirstQuestion", "LastQuestion"]: |
|
|
if question_type in study_config.surveys[self.survey_name]: |
|
|
last_question = study_config.surveys[self.survey_name]["Code"] + "_" + \ |
|
|
study_config.surveys[self.survey_name][question_type] |
|
|
question_index = list(df_f.columns).index(last_question) |
|
|
if question_type == "FirstQuestion": |
|
|
keep_cols = list(set(non_survey_vars_to_keep + list(df_f.columns)[question_index:])) |
|
|
else: |
|
|
keep_cols = list(set(non_survey_vars_to_keep + list(df_f.columns)[:question_index + 1])) |
|
|
df = df_f[[x for x in df_f.columns if x in keep_cols]] |
|
|
|
|
|
|
|
|
var_order = [x for x in non_survey_vars_to_keep if x in df_f.columns] + [x for x in df_f.columns if x not in non_survey_vars_to_keep] |
|
|
kept_var_order = [x for x in var_order if x in df.columns] |
|
|
df = df[kept_var_order] |
|
|
|
|
|
|
|
|
""" |
|
|
for var in df.columns: |
|
|
if f"{self.code}_{self.code}" in var: |
|
|
non_pref_var = var.replace(f'{self.code}_', '') |
|
|
if var.replace(f"{self.code}_", "") in df.columns: |
|
|
# if df[var] == df[var.replace(f"{self.code}_","")]: |
|
|
if df.equals(df[[var, non_pref_var]]): |
|
|
df = df.drop(columns=[var]) |
|
|
print(f"Dropping {var} b/c {non_pref_var} and identical") |
|
|
else: |
|
|
test = df[[var, non_pref_var]] |
|
|
print(f"Keeping {var} b/c {var} != {non_pref_var}") |
|
|
else: |
|
|
print(f"Keeping {var} b/c unprefixed var still in df") |
|
|
""" |
|
|
return df |
|
|
|
|
|
def _reshape_text_survey(self,df): |
|
|
df["SurveyStartDate"] = df["SurveyStartDatetime"].dt.date |
|
|
df = BuilderUtils.add_phase_label(raw_df = df,raw_df_date="SurveyStartDate", start_buffer=0, end_buffer=-1) |
|
|
|
|
|
|
|
|
codes = [study_config.phases[x]["StartSurvey"]["Code"] for x in list(study_config.phases.keys())] |
|
|
rename_dic = dict(zip(list(study_config.phases.keys()), codes)) |
|
|
df["Phase"] = df["Phase"].apply(lambda x: rename_dic[x] if x in rename_dic else x) |
|
|
|
|
|
keep_vars =[study_config.surveys[self.survey_name]["CompleteQuestion"],"SurveyStartDatetime","SurveyEndDatetime","Complete"] |
|
|
df_p = df.pivot_table(index=["AppCode"], |
|
|
values=keep_vars, |
|
|
columns=["Phase"], |
|
|
aggfunc='first') |
|
|
|
|
|
|
|
|
df_p.columns = [f'_{self.code}'.join(col[::-1]).strip() for col in df_p.columns.values] |
|
|
df_p = df_p.reset_index() |
|
|
|
|
|
return df_p |