|
|
import os |
|
|
import pickle |
|
|
import sys |
|
|
from datetime import datetime |
|
|
|
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
import yaml |
|
|
|
|
|
"""A one liner to serialize df's into h5 files """ |
|
|
def save_hdf(df, path): |
|
|
|
|
|
if ".pickle" in path: |
|
|
print(f"pickle in {path}!") |
|
|
path = path.replace(".pickle","") |
|
|
|
|
|
if ".h5" not in path: |
|
|
path = path + ".h5" |
|
|
|
|
|
if len(df) == 0: |
|
|
print("not saving b/c data frame is empty") |
|
|
else: |
|
|
|
|
|
date_cols = [x for x in df.columns if "Date" in x] |
|
|
for date_col in date_cols: |
|
|
|
|
|
if df[date_col].dtype == np.object: |
|
|
df[date_col] = pd.to_datetime(df[date_col]) |
|
|
df = df.rename(columns = {date_col : date_col+"_date_object"}) |
|
|
df.to_hdf(path_or_buf = path, key = 'df', format="table", mode = 'w') |
|
|
|
|
|
|
|
|
def open_hdf(path): |
|
|
if ".pickle" in path: |
|
|
print(f"pickle in {path}!") |
|
|
path = path.replace(".pickle","") |
|
|
|
|
|
if ".h5" not in path: |
|
|
path = path + ".h5" |
|
|
|
|
|
if os.path.getsize(path)==0: |
|
|
print("invalid file: size = 0") |
|
|
return pd.DataFrame() |
|
|
|
|
|
df = pd.read_hdf(path_or_buf = path, key= 'df' , mode= "r") |
|
|
|
|
|
|
|
|
date_object_cols = [x for x in df.columns if "date_object" in x] |
|
|
for date_object_col in date_object_cols: |
|
|
df[date_object_col] = df[date_object_col].dt.date |
|
|
df = df.rename(columns = {date_object_col:date_object_col.replace("_date_object","")}) |
|
|
|
|
|
return df |
|
|
|
|
|
""" |
|
|
saves a dict or dataframe as a pickle |
|
|
- obj: the python object |
|
|
- path: file path |
|
|
- df_bool whether or not the object is a dataframe |
|
|
- test_override: if true, then during testing, it's ok to overwrite the previous pickle |
|
|
""" |
|
|
def save_pickle(obj, path, df_bool=True, test_override = False): |
|
|
config_user = open_yaml("config_user.yaml") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ".pickle" not in path: |
|
|
path = path + ".pickle" |
|
|
|
|
|
if df_bool == True: |
|
|
assert len(list(obj.index)) == len(set(list(obj.index))) |
|
|
with open(path, 'wb') as outfile: |
|
|
pickle.dump(obj.to_dict(), outfile) |
|
|
else: |
|
|
outfile = open(path, 'wb') |
|
|
pickle.dump(obj, outfile) |
|
|
outfile.close() |
|
|
|
|
|
def read_gz(path): |
|
|
if ".gz" not in path: |
|
|
path = path + ".gz" |
|
|
df = pd.read_csv(path, compression='gzip') |
|
|
date_cols = [x for x in df.columns if "Date" in x] |
|
|
for date_col in date_cols: |
|
|
try: |
|
|
df[date_col] = pd.to_datetime(df[date_col], infer_datetime_format=True) |
|
|
except: |
|
|
print(f"couldn't parse datetime of {date_col}") |
|
|
return df |
|
|
|
|
|
"""A one liner to un-pickle df's and other stuff""" |
|
|
|
|
|
def open_pickle(path, df_bool=True): |
|
|
if ".pickle" not in path: |
|
|
path = path + ".pickle" |
|
|
|
|
|
start = datetime.now() |
|
|
|
|
|
infile = open(path, 'rb') |
|
|
obj = pickle.load(infile) |
|
|
infile.close() |
|
|
end = datetime.now() |
|
|
diff = end-start |
|
|
|
|
|
|
|
|
if df_bool == True: |
|
|
return pd.DataFrame(obj) |
|
|
else: |
|
|
return obj |
|
|
|
|
|
|
|
|
""" Opens a df (pickled or not) if it's in path, else it creates an empty dataframe with the specified col_names""" |
|
|
|
|
|
def soft_df_open(path, obj_type="pickle",cols=[]): |
|
|
if ".pickle" not in path: |
|
|
path = path + ".pickle" |
|
|
|
|
|
if os.path.isfile(path) == True: |
|
|
if obj_type == "pickle": |
|
|
df = open_pickle(path, df_bool=True) |
|
|
elif ".csv" in path: |
|
|
df = pd.read_csv(path) |
|
|
else: |
|
|
df="" |
|
|
print("Don't know what kind of file you want to read") |
|
|
sys.exit() |
|
|
|
|
|
else: |
|
|
print(f"Could not find {path}") |
|
|
df = pd.DataFrame(columns=cols) |
|
|
return df |
|
|
|
|
|
""" Opens a yaml file as a dictionary""" |
|
|
def open_yaml(path): |
|
|
with open(path, 'r') as stream: |
|
|
dic = yaml.safe_load(stream) |
|
|
return dic |
|
|
|
|
|
def update_csv(df, path): |
|
|
""" appends a csv with provided df, specified in path, or creates csv with df provided |
|
|
assumes the index is in the column named 'index'""" |
|
|
if os.path.exists(path): |
|
|
df_norm = pd.read_csv(path) |
|
|
df_norm_updated = df_norm.append(df).drop_duplicates(keep="last", subset='index') |
|
|
assert len(df_norm.columns) == len(df_norm_updated.columns) |
|
|
df_norm_updated.to_csv(path, |
|
|
index=False) |
|
|
else: |
|
|
df.to_csv(os.path.join(path), index=False) |
|
|
|