File size: 8,608 Bytes
8a79f2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 |
import pandas as pd
import os
import sys
import re
import pickle
import yaml
from datetime import datetime, timedelta, timezone
import dateutil.parser
import pytz
from lib.experiment_specs import study_config
from lib.utilities import codebook
""" Purpose: cleans the iso datetimes in a dataframe column
-Input:
- DataFrame: data - raw input data that contains the time column
- col_name - the name of the column
- keep_nan : keep rows with empty value for df[col_name]
- orig_tz: when you remove the timezone adjustment, what is the timezone. if "local", then removing the timezone
yields the local time for the participant.
- Output:
-dataframe with the following new columns:
- {col_name}Datetime - in the phone's local time
- {col_name}DatetimeHour
- {col_name}Date
- {col_name}EasternDatetime - in eastern time
- {col_name}EasternDatetimeHour
"""
def clean_iso_dates(data_raw: pd.DataFrame, col_name: str, keep_nan: bool = False, orig_tz: str = "Local"):
data = data_raw.loc[data_raw[col_name].notnull()]
data[col_name + 'DatetimeTZ'] = data[col_name].apply(lambda x: dateutil.parser.parse(x).replace(microsecond=0))
# if the datetime without the timezone adjustment brings the time to local
if orig_tz == "Local":
data[col_name + 'Datetime'] = data[col_name + 'DatetimeTZ'].apply(lambda x: x.replace(tzinfo=None))
data[col_name + 'DatetimeUTC'] = data[col_name + 'DatetimeTZ'].apply(
lambda x: x.replace(tzinfo=timezone.utc) - x.utcoffset())
# if the datetime without the timezone adjustment brings the time UTC
else:
data[col_name + 'Datetime'] = data[col_name + 'DatetimeTZ'].apply(
lambda x: x.replace(tzinfo=timezone.utc) + x.utcoffset())
data[col_name + 'Datetime'] = data[col_name + 'Datetime'].apply( lambda x: x.replace(tzinfo=None))
data[col_name + 'DatetimeUTC'] = data[col_name + 'DatetimeTZ'].apply(lambda x: x.replace(tzinfo=timezone.utc))
data[col_name + 'DatetimeHour'] = data[col_name + 'Datetime'].apply(lambda x: x.replace(minute=0, second=0))
data[col_name + 'Date'] = data[col_name + 'DatetimeHour'].apply(lambda x: x.date())
# Create Col in Eastern Time
eastern = pytz.timezone('US/Eastern')
data[col_name + 'EasternDatetime'] = data[col_name + 'DatetimeUTC'].apply(
lambda x: x.astimezone(eastern).replace(tzinfo=None))
data[col_name + 'EasternDatetimeHour'] = data[col_name + 'EasternDatetime'].apply(
lambda x: x.replace(minute=0, second=0))
data = data.drop(columns=[col_name, col_name + 'DatetimeTZ', col_name + 'DatetimeUTC'])
if keep_nan:
missing = data_raw.loc[data_raw[col_name].isnull()]
data = data.append(missing)
return data
"""remove data files from directory"""
def remove_files(directory):
for file in os.listdir(directory):
file_path = os.path.join(directory, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
""" This method inputs missing start and enddatetime for survey incompletes. This helps determine what to count as use in phase,
for people that have not completed their surveys"""
def inpute_missing_survey_datetimes(df, phase):
specs = study_config.phases[phase]
old_code = specs["StartSurvey"]["Code"]
new_code = specs["EndSurvey"]["Code"]
# the missing end date will be today if the survey hasn't ended yet
missing_end_date = min(datetime.now().replace(microsecond=0), study_config.phases[phase]["EndSurvey"]["End"])
# if the end survey hasn't even been distributed yet, add end survey completion col artificially to df
if datetime.now() < study_config.phases[phase]["EndSurvey"]["Start"]:
df.loc[(df[f"{old_code}_Complete"] == "Complete") , f"{new_code}_SurveyStartDatetime"] = missing_end_date
else:
# we inpute the completion of the end survey, if they completed the start survey:
df.loc[(df[f"{old_code}_Complete"] == "Complete") &
(df[f"{new_code}_SurveyStartDatetime"].isnull()), f"{new_code}_SurveyStartDatetime"] = missing_end_date
return df
""" Adds survey code prefix to each column in the df"""
def add_survey_code(df, code):
for col in df.columns.values:
no_prefix_cols = study_config.main_cols + study_config.embedded_main_cols
if col not in no_prefix_cols:
new_name = code + "_" + col
df = df.rename(columns={col: new_name})
return df
"""A function which takes the clean_master master and outputs all the variables from a phase, without the prefixes"""
def keep_relevant_variables(df_raw, phase):
start_code = study_config.phases[phase]["StartSurvey"]["Code"]
end_code = study_config.phases[phase]["EndSurvey"]["Code"]
"""Keep participants that completed the relevant survey in the phase"""
df = df_raw.loc[df_raw[f"{start_code}_Complete"] == "Complete"].copy()
#"""LIMIT INDEX CONSTRUCTION TO FOLKS WITH CONSISTENT USE"""
#codebook_dic = pd.read_csv(codebook.main_codebook_path, index_col="VariableName").to_dict(orient='index')
## get all columns in the given phase that are also in the df
#keep_cols = [codebook.add_prefix_var(x, phase, codebook_dic) for x in codebook_dic.keys()]
#keep_cols_in_df = [x for x in keep_cols if x in df.columns]
keep_cols = [x for x in df.columns if f"{start_code}_" in x or x in study_config.main_cols+study_config.embedded_main_cols]
df = df[keep_cols]
# drop prefixes on these columns
df.columns = [x.replace(f"{start_code}_","") for x in df.columns]
return df
def add_A_to_appcode(df,appcode_col):
df[appcode_col] = df[appcode_col].astype(str).fillna("nan")
#convert weird float appcodes to integers
df[appcode_col] = df[appcode_col].apply(lambda x: int(float(x)) if (x != "nan") and (x[0] != "A") else x)
# add to those who need it
df[appcode_col] = df[appcode_col].astype(str).apply(lambda x: "A" + x if len(x) == 8 else x)
# assert we only have nans and proper appcodes
df["Check"] = df[appcode_col].apply(lambda x: True if (len(x) == 9) or (len(x) == 3) else False)
l = df["Check"].value_counts()
l_s = df.loc[df["Check"]==False]
assert df["Check"].all() == True
return df
"returns the latest main survey that has already ended"
def get_last_survey():
last_complete_time = datetime(2018, 1, 1, 0, 0)
last_survey = ""
surveys = study_config.main_surveys
for survey in surveys:
chars = study_config.surveys[survey]
if chars["End"] < datetime.now():
if chars["End"] > last_complete_time:
last_survey = survey
last_complete_time = chars["End"]
return last_survey
# asserts two dfs that share common appcodes and columns, within a col_list
def assert_common_appcode_values(df1, df2, col_list):
common_appcodes = set(df1["AppCode"]).intersection(set(df2["AppCode"]))
common_columns = list(set(df1.columns).intersection(set(df2.columns)).intersection(col_list))
compare_list = []
for df in [df1, df2]:
df = df.loc[df["AppCode"].isin(common_appcodes)]
df = df[common_columns]
df = df.sort_values(by="AppCode").reset_index(drop=True).astype(str)
compare_list.append(df)
assert len(compare_list[0]) == len(compare_list[1])
c = compare_list[0].merge(compare_list[1], how='outer', on='AppCode', )
for col in compare_list[0].columns:
if col == "AppCode":
continue
try:
c[col + "_x"].equals(c[col + "_y"]) == True
except:
print(f"no match on{col}")
print(c[col + "_x"].dtype)
print(c[col + "_y"].dtype)
print("First five rows that don't match:")
print(c.loc[c[col + "_x"] != c[col + "_y"]].head())
sys.exit()
def merge_back_master(df_master, df_phase, phase):
""" add prefixes to a phase specific df, and merge it to master"""
codebook_dic = pd.read_csv(codebook.codebook_path, index_col="VariableName").to_dict(orient='index')
df_phase.columns = [codebook.add_prefix_var(x, phase, codebook_dic) for x in df_phase.columns]
new_cols = ["AppCode"] + list(set(df_phase.columns) - set(df_master.columns))
df_master = df_master.merge(df_phase[new_cols], how='outer', left_on="AppCode", right_on="AppCode")
return df_master |