anonymous-submission-acl2025's picture
add 17
8a79f2e
import sys
from lib.experiment_specs import study_config
from lib.data_helpers import data_utils
from lib.data_helpers import test
from lib.utilities import codebook
from lib.utilities import serialize
from functools import reduce
import pandas as pd
import os
from datetime import datetime, timedelta
from lib.data_helpers.builder_utils import BuilderUtils
"""
Object that cleans phone dashboard and PC Dashbaord data, by doing the following:
"""
class CleanEvents():
def __init__(self, source: str, keyword: str):
"""
establishes a bunch of paths
Parameters
----------
source: either "PhoneDashboard" or "PCDashboard"
keyword: the kind of [PhoneDasboard] data, like "Use" or "Alternative
"""
self.user_event_file = os.path.join("data","external", "intermediate", source, f"{keyword}Summary.csv")
self.clean_file = os.path.join("data", "external", "intermediate", source, f"{keyword}")
self.clean_test_file = os.path.join("data","external", "intermediate_test", source, f"{keyword}")
self.keyword = keyword
self.config_user_dict = serialize.open_yaml("config_user.yaml")
def clean_events(self, raw_event_df: pd.DataFrame, date_col: str, cleaner, phase_data: bool = True):
"""
- subsets data: only appcodes in study, within the study dates
- adds phase label
- creates phase level data
- applies specific cleaning functions given as input
Parameters
----------
raw_event_df: a dataframe that contains raw PD or PC data
date_col: the column name that gives the date of the row, used for dividing rows into phases
cleaner: custom cleaner object
phase_data: whether or not the cleaner should create phase level data
Returns
-------
"""
print(f"\t Cleaning {self.keyword} {datetime.now()}")
df_list = []
print(len(raw_event_df))
df_clean = cleaner.prep_clean(raw_event_df)
print(f"{len(df_clean)}: After Clean")
df_clean = df_clean.loc[(df_clean[date_col] >= study_config.first_pull.date()) & (df_clean[date_col] <= study_config.last_pull.date())]
df_clean = BuilderUtils.add_phase_label(raw_df = df_clean, raw_df_date = date_col)
print(f"Length of file before saving {len(df_clean)}")
print(df_clean.memory_usage(deep=True)/1024**2)
if self.config_user_dict["local"]["test"]:
test.save_test_df(df_clean, self.clean_test_file)
else:
try:
serialize.save_pickle(df_clean, self.clean_file)
except:
print("Couldn't save pickle!")
#try:
# serialize.save_hdf(df_clean, self.clean_file)
#except:
# print("Couldn't save hdf!")
if phase_data ==True:
for phase,specs in study_config.phases.items():
#we have to wait two days to begin collecting new phase data because phase use doesn't start untill a day
# after survey launch, and that data isn't sent to dropbox until the day after
if datetime.now()<study_config.phases[phase]["StartSurvey"]["Start"]+timedelta(2):
continue
old_code = specs["StartSurvey"]["Code"]
df_p = df_clean.loc[df_clean["Phase"] == phase]
df_p = cleaner.phase_clean(df_p,phase)
df_p = data_utils.add_survey_code(df_p, old_code)
df_list.append(df_p)
p_df = reduce(lambda x, y: pd.merge(x, y, how= "outer", on = 'AppCode'), df_list)
if self.config_user_dict["local"]["test"]==False:
p_df.to_csv(self.user_event_file, index = False)
print(f"\t Done Cleaning {datetime.now()}")
return p_df, df_clean
else:
return df_clean