File size: 17,294 Bytes
8a79f2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
import os
import sys
import shutil
import zipfile
import multiprocessing
import pandas as pd
from datetime import datetime,timedelta

from lib.data_helpers import data_utils
from lib.experiment_specs import study_config
from lib.data_helpers import test
from lib.data_helpers.builder_utils import BuilderUtils
from lib.data_helpers.gaming import Gaming

from lib.utilities import serialize

class PullEvents():

    port_dir = {"PCDashboard": os.path.join("data","external","dropbox","..","PCPort"),
                "PhoneDashboard":os.path.join("data","external","dropbox","..","PhoneDashboardPort")}

    #identifying_cols are a list of columns in the dataframe that identify a row. none of them can be empty
    def __init__(self,source: str, keyword: str, scratch: bool, test: bool,
                 time_cols: list, raw_timezone: str, appcode_col: str, identifying_cols: list, sort_cols: list,
                 drop_cols: list, cat_cols: list,
                 compress_type: str, processing_func = BuilderUtils.default_puller_process, file_reader = None):
        """

        Purpose
        -------

        This class appends the raw data files housed in self.zipped_directory, cleans them a bit with the self.processing func,
        and saves the merged data file in self.raw_file. Unless self.scratch == True, the puller will only process new raw data files
        and appends them to the dataframe saved in self.raw_file. The puller documents which files have been processed in the config located in self.config_file_path

        WARNING: if self.scratch==TRUE, it may take hours or days to reprocess all the data. Call Michael to discuss if this needs to happen.

        Parameters
        ----------
        source - source the app data source (either PC or PD)
        keyword - the kind of data coming in (e.g. Snooze, Use etc)
        scratch - whether or not to start data processing from scratch
        test - whether or not to test the pulling pipeline (it will reprocess that latest three zipfiles) (I wouldn't play with this)
        time_cols - list of columns that contain datetimes
        raw_timezone - the timezone of the raw data
        appcode_col - the raw column name containing the appcodes
        identifying_cols - list of columns that should identify a unique row
        sort_cols - list of columns that describes how the df should be ordered. The df will be sorted from smallest to largest value of the sort_cols
        drop_cols - list of columns that appear in the raw zipfile or after zipfile processing that should be dropped
        cat_cols - list of columns that can be converted into the categorical data type
        compress_type - how the zipfiles are compressed (either "folder", "csv", or "txt")
        processing_func - the function used to process the raw data (e.g. clean_master dates)
        file_reader - function that reads in the raw file into memory. only required for file stored in zip folders.
        """

        self.source = source
        self.keyword = keyword
        self.scratch = scratch
        self.pull_test = test
        self.time_cols = time_cols
        self.raw_timezone = raw_timezone
        self.appcode_col = appcode_col
        self.identifying_cols = identifying_cols
        self.sort_cols = sort_cols
        self.drop_cols = drop_cols
        self.cat_cols = cat_cols
        self.compress_type = compress_type
        self.processing_func = processing_func
        self.file_reader = file_reader


        self.zipped_directory = os.path.join("data","external", "input", source, f"Raw{self.keyword}Zipped")
        self.buggy_dir = os.path.join("data","external", "input", source, "BuggyFiles")
        self.config_file_path = os.path.join("data","external", "input", source, f"{self.source}{self.keyword}Config.csv")
        self.raw_file = os.path.join("data","external", "input", source, f"Raw{self.keyword}")
        self.test_file = os.path.join("data","external", "input_test", source, f"Raw{self.keyword}")

        self.config_user_dict = serialize.open_yaml("config_user.yaml")

        self.builder_utils = BuilderUtils()
        self.added_config = self.builder_utils.get_config(self.config_file_path) #dictionary of zipfiles in the raw_file
        self.new_config = {}


    def update_data(self):
        print(f"\n Updating {self.keyword} data! {datetime.now()}")
        if self.config_user_dict["local"]["test"]==True:
            df = serialize.soft_df_open(self.test_file)
            df = self._memory_redux(df)
            print(df.memory_usage(deep=True) / 1024 ** 2)
            return df

        else:
            self.builder_utils.transport_new_zip_files(PullEvents.port_dir[self.source], self.keyword, self.zipped_directory)
            new_zip_files = [x for x in os.listdir(self.zipped_directory) if (x.replace(".zip", "") not in self.added_config) & (".zip" in x)]
            print(f"new zip files {len(new_zip_files)}")


            # if no new data or scratch run, just load old data
            if ((self.scratch == False) &
                (len(new_zip_files) == 0) &
                (self.pull_test == False)):
                print("\t No New Data! Loading pickled raw data into memory")

                if(os.path.getsize(self.raw_file+".pickle") > 0) :
                    df = serialize.soft_df_open(self.raw_file)
                else:
                    print("Raw file is empty. Reading Backup Compression Zip")
                    df = serialize.read_gz(self.raw_file+".gz")
                df = self._memory_redux(df)

                # Archiving Recruitment Data IF Use Data
                if (self.keyword == "Alternative") or (self.keyword == "Use"):
                    #self._archive_recruitment(df)
                    df = df.loc[df["CreatedDatetimeHour"] >= study_config.surveys["Baseline"]["Start"]-timedelta(1)]

                return df

            else:
                print(f"Length of new zip files {len(new_zip_files)}")
                df = self.aggregate_data(new_zip_files)
                self.builder_utils.update_config(self.added_config,
                                                 self.new_config,
                                                 self.config_file_path)
                return df

    def aggregate_data(self,new_zip_files):
        old_df, new_zip_files = self._configure_data(new_zip_files)

        print(f"\t Going to add {len(new_zip_files)} files:")
        if self.config_user_dict['local']["parallel"] == False:
            df_dic = self._process_zips(new_zip_files)

        #if the number of files > # of cores, then multiprocess
        elif multiprocessing.cpu_count() < len(new_zip_files):
            df_dic = self._process_zips_mp(new_zip_files)

        else:
            print(f"not multiprocessing b/c number of cores {multiprocessing.cpu_count()} < {len(new_zip_files)}")
            df_dic = self._process_zips(new_zip_files)

        #get appcodes to keep
        print("\t get data survey")
        cl_file = os.path.join(os.path.join("data","external","dropbox_confidential","ContactLists","Used"),
                               study_config.kept_appcode_cl)
        a_df = pd.read_csv(cl_file)
        a_df = data_utils.add_A_to_appcode(a_df,"AppCode")
        appcodes = list(a_df["AppCode"])
        print(len(appcodes))
        assert len(appcodes) == study_config.number_of_kept_appcodes

        # combine new with old and keep relevant appcodes
        if len(df_dic)>0:
            print("\t adding new data to master")
            new_df = pd.concat(list(df_dic.values()), sort=True)
            new_df = new_df.loc[new_df["AppCode"].isin(appcodes)]
            parent_df = pd.concat([old_df,new_df], sort=True)
        else:
            print("\t no new data!")
            parent_df = old_df

        # minimal cleaning and save to disk: sort from smallest to largest values of sort_cols
        print(f"\t Len before duplicate drop {len(parent_df)}")
        parent_df = parent_df.sort_values(by = self.sort_cols)
        parent_df = parent_df.drop_duplicates(subset = self.identifying_cols, keep = 'last')
        print(f"\t Len after duplicate drop {len(parent_df)}")

        for col in parent_df.columns:
            if "Unnamed" in col:
                parent_df = parent_df.drop(columns= [col])
        parent_df = parent_df.reset_index(drop=True)
        print(parent_df.memory_usage(deep=True) / 1024 ** 2)

        try:
            os.rename(self.raw_file+".pickle",self.raw_file+"PrevRun.pickle")
        except:
            print('could not find old pickle file')

        try:
            serialize.save_pickle(parent_df, self.raw_file)
        except:
            print("Failed to save pickle!")

        try:
            test.save_test_df(parent_df, self.test_file)
        except:
            print("Failed to save testfile")

        # DONT PUT IN TRY BECAUSE IF BACKUP FAILS, WE WANT TO RE PROCESS THE NEW FILES
        try:
            os.rename(self.raw_file + ".gz", self.raw_file + "PrevRun.gz")
        except:
            print("no old gz file")
        parent_df.to_csv(self.raw_file + '.gz', index=False, compression='gzip')

        print(f"\t Done Saving {datetime.now()}")

        # add new guys to config in memory, now that everything has saved
        for zip_file, df in df_dic.items():
            try:
                latest_hour = str(df[self.time_cols[0]].max())
                earliest_hour = str(df[self.time_cols[0]].min())
            except:
                latest_hour = "NAN"
                earliest_hour = "NAN"
            self.new_config[zip_file.replace(".zip", "")] = {"EarliestHour": earliest_hour,
                                                             "LatestHour": latest_hour,
                                                             "ZipFile": zip_file}
        return parent_df

    """ modifes the reaggregation in the following way:
     - if self.scatch is True: all zipfiles in data dire are new, and old_df is empty
     - if self.test is True: reprocess the last three data files, and old_df is reloaded
     - else: process the new zip files with old df"""
    def _configure_data(self,new_zip_files):
        if self.scratch:
            all_zip_files = [x for x in os.listdir(self.zipped_directory) if ".zip" in x]
            new_zip_files = sorted(all_zip_files)

            #continue_ans = input("Warning: Do you want to reprocess all use files? Enter (Y or N) \n ")
            continue_ans = "Y"
            if continue_ans == "N":
                sys.exit()
            print(f"DELETING ADDED EVENT CONFIG, CAUSING REAGGREGATION OF ALL {self.keyword} ZIPFILES")
            self.added_config = {}
            if self.keyword == "Use":
                print("\t Also Deleting all Granular Gaming Files")
                for gran_folder in [os.path.join(Gaming.gaming_dir, "FirstLast"),
                                    os.path.join(Gaming.gaming_dir, "Granular")]:
                    shutil.rmtree(gran_folder)
                    os.mkdir(gran_folder)
            old_df = pd.DataFrame()

        else:
            if self.pull_test:
                try:
                    assert len(new_zip_files) == 0
                except:
                    print("There are actual new zip files to process! First process these files, then test")
                    sys.exit()

                print("TEST: Reprocessing 3 files from zipped directory")
                test_files = os.listdir(self.zipped_directory)[-3:]
                for zip_file in test_files:
                    del self.added_config[zip_file.replace(".zip", "")]
                new_zip_files = test_files

            if not os.path.exists((self.raw_file+".pickle")):
                old_df = pd.DataFrame()

            elif os.path.getsize(self.raw_file + ".pickle") > 0:
                old_df = serialize.soft_df_open(self.raw_file)
            else:
                print("Raw file is empty. Reading Backup Compression Zip")
                old_df = serialize.read_gz(self.raw_file + ".gz")

            if len(old_df)>1:
                old_df = self._memory_redux(old_df)

            # Archiving Recruitment Data IF Use Data
            if (self.keyword == "Alternative") or (self.keyword == "Use"):
                #self._archive_recruitment(old_df)
                old_df = old_df.loc[old_df["CreatedDatetimeHour"] >= study_config.surveys["Baseline"]["Start"]-timedelta(1)]

        return old_df, new_zip_files

    def _process_zips_mp(self, new_zip_files: list):

        # create the pool object for multiprocessing
        pool = multiprocessing.Pool(processes=study_config.cores)

        # split the files to add into n lists where n = cores
        chunks = [new_zip_files[i::study_config.cores] for i in range(study_config.cores)]

        print(f"Multiprocessing with {study_config.cores} cpus")
        df_list_of_dics = pool.map(func = self._process_zips, iterable = chunks)
        pool.close()
        print("Done Multiprocessing")

        # flatten the list of dictionaries
        df_dic = {}
        for d in df_list_of_dics:
            for k, v in d.items():
                df_dic[k] = v
        return df_dic

    def _process_zips(self,new_zip_files):
        df_dic = {}
        i = 1
        for zip_file in new_zip_files:
            print(f"processing {zip_file} ({round((i * 100) / len(new_zip_files))}%)", end = "\r")
            df = self._process_zip(zip_file)
            if len(df) == 0:
                continue
            df_dic[zip_file] = df
            i = i + 1
        return df_dic

    def _process_zip(self, zip_file):
        if ".zip" not in zip_file:
            return pd.DataFrame()

        # open zipfile
        if self.compress_type != "folder":
            df, problem = self._open_zipfile(zip_file)

        # open json zip folder
        else:
            df = self._open_zipfolder(zip_file)
            problem = ""

        if len(df) == 0:
            if problem != "Corrupt":
                problem = "Empty"
                print(f"{zip_file} is empty ...archiving in {self.buggy_dir}/{problem}")

            old_file = os.path.join(self.zipped_directory, zip_file)
            new_file = os.path.join(self.buggy_dir, problem, zip_file)
            os.rename(old_file, new_file)
            return df

        df.columns = df.columns.str.replace(' ', '')
        df = df.rename(columns={self.appcode_col: "AppCode"})
        df["AppCode"] = df["AppCode"].astype(str).apply(lambda x: "A" + x if x != "nan" and x.isnumeric() else x)
        df = self.processing_func(df, zip_file, self)

        df["Zipfile"] = zip_file
        df = self._memory_redux(df)

        return df

    def _open_zipfile(self,zip_file):
        if self.compress_type == "txt":
            separator = '\t'
        elif self.compress_type == "csv":
            separator = ','
        else:
            separator = ""
            print("Illegal seperator")
            sys.exit()
        try:
            df = pd.read_csv(os.path.join(self.zipped_directory, zip_file), compression='zip', header=0,
                             sep=separator, quotechar='"')
            problem = "None"
        except:
            print(f"{zip_file} is corrupt. Investigate in archive")
            df = pd.DataFrame()
            problem = "Corrupt"
        return df, problem

    def _open_zipfolder(self,zip_folder):
        #print(zip_folder)
        #print(self.zipped_directory)
        zip_ref = zipfile.ZipFile(os.path.join(self.zipped_directory, zip_folder))
        temp_folder = os.path.join(self.zipped_directory, zip_folder.replace(".zip", "FOLDER"))
        zip_ref.extractall(path=temp_folder)
        df_list = []
        for file in os.listdir(temp_folder):
            file_path = os.path.join(temp_folder, file)
            clean_data = self.file_reader(file_path)
            df_list.append(clean_data)
        shutil.rmtree(temp_folder)
        df = pd.concat(df_list)
        return df

    def _open_zipfolders_install(self,zip_folders):
        """for recovering install data"""
        for zip_folder in zip_folders:
            df = self._open_zipfolder(zip_folder)

    def _memory_redux(self,df):
        # drop columns that were initially in raw data or processing byproduct
        for drop_col in self.drop_cols + ["Server","AsOf"]:
            if drop_col in df.columns.values:
                df = df.drop(columns = [drop_col])

        # make columns categorical
        for cat_col in self.cat_cols + ["Zipfile"]:
            if cat_col in df.columns.values:
                try:
                    df[cat_col] = df[cat_col].astype('category')
                except:
                    print(f"\t categorizing {cat_col} failed!")

        return df

    def _archive_recruitment(self,df):
        if os.path.exists(self.raw_file + "RecruitmentPhase.pickle"):
            pass
        else:
            r_df_raw = df.loc[df["CreatedDatetimeHour"] < study_config.surveys["Baseline"]["Start"]-timedelta(1)]
            try:
                serialize.save_pickle(r_df_raw, self.raw_file + "RecruitmentPhase.pickle")
            except:
                r_df_raw.to_csv(self.raw_file + 'RecruitmentPhase.gz', index=False, compression='gzip')