File size: 4,926 Bytes
8a79f2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
import os
import pickle
import sys
from datetime import datetime
import pandas as pd
import numpy as np
import yaml
"""A one liner to serialize df's into h5 files """
def save_hdf(df, path):
if ".pickle" in path:
print(f"pickle in {path}!")
path = path.replace(".pickle","")
if ".h5" not in path:
path = path + ".h5"
if len(df) == 0:
print("not saving b/c data frame is empty")
else:
# ensure all datetime columns are parsed correctly by adding _at suffix (see pd.read_json docs)
date_cols = [x for x in df.columns if "Date" in x]
for date_col in date_cols:
#date object
if df[date_col].dtype == np.object:
df[date_col] = pd.to_datetime(df[date_col])
df = df.rename(columns = {date_col : date_col+"_date_object"})
df.to_hdf(path_or_buf = path, key = 'df', format="table", mode = 'w')
def open_hdf(path):
if ".pickle" in path:
print(f"pickle in {path}!")
path = path.replace(".pickle","")
if ".h5" not in path:
path = path + ".h5"
if os.path.getsize(path)==0:
print("invalid file: size = 0")
return pd.DataFrame()
df = pd.read_hdf(path_or_buf = path, key= 'df' , mode= "r")
# ensure all datetime columns are parsed correctly by adding _at suffix (see pd.read_json docs)
date_object_cols = [x for x in df.columns if "date_object" in x]
for date_object_col in date_object_cols:
df[date_object_col] = df[date_object_col].dt.date
df = df.rename(columns = {date_object_col:date_object_col.replace("_date_object","")})
return df
"""
saves a dict or dataframe as a pickle
- obj: the python object
- path: file path
- df_bool whether or not the object is a dataframe
- test_override: if true, then during testing, it's ok to overwrite the previous pickle
"""
def save_pickle(obj, path, df_bool=True, test_override = False):
config_user = open_yaml("config_user.yaml")
#if config_user["local"]["test"]:
# if "_test" not in path:
# pass
#if test_override:
# pass
#else:
#print(f"TESTING: save this file? {path}")
#continue_ans = input("Enter (Y or N) \n ")
#if continue_ans != "Y":
# sys.exit()
if ".pickle" not in path:
path = path + ".pickle"
if df_bool == True:
assert len(list(obj.index)) == len(set(list(obj.index)))
with open(path, 'wb') as outfile:
pickle.dump(obj.to_dict(), outfile)
else:
outfile = open(path, 'wb')
pickle.dump(obj, outfile)
outfile.close()
def read_gz(path):
if ".gz" not in path:
path = path + ".gz"
df = pd.read_csv(path, compression='gzip')
date_cols = [x for x in df.columns if "Date" in x]
for date_col in date_cols:
try:
df[date_col] = pd.to_datetime(df[date_col], infer_datetime_format=True)
except:
print(f"couldn't parse datetime of {date_col}")
return df
"""A one liner to un-pickle df's and other stuff"""
def open_pickle(path, df_bool=True):
if ".pickle" not in path:
path = path + ".pickle"
start = datetime.now()
#print(f"\t Reading {path}")
infile = open(path, 'rb')
obj = pickle.load(infile)
infile.close()
end = datetime.now()
diff = end-start
#print(f"\t Done reading. Took: {round(diff.seconds/60,2)} minutes")
if df_bool == True:
return pd.DataFrame(obj)
else:
return obj
""" Opens a df (pickled or not) if it's in path, else it creates an empty dataframe with the specified col_names"""
def soft_df_open(path, obj_type="pickle",cols=[]):
if ".pickle" not in path:
path = path + ".pickle"
if os.path.isfile(path) == True:
if obj_type == "pickle":
df = open_pickle(path, df_bool=True)
elif ".csv" in path:
df = pd.read_csv(path)
else:
df=""
print("Don't know what kind of file you want to read")
sys.exit()
else:
print(f"Could not find {path}")
df = pd.DataFrame(columns=cols)
return df
""" Opens a yaml file as a dictionary"""
def open_yaml(path):
with open(path, 'r') as stream:
dic = yaml.safe_load(stream)
return dic
def update_csv(df, path):
""" appends a csv with provided df, specified in path, or creates csv with df provided
assumes the index is in the column named 'index'"""
if os.path.exists(path):
df_norm = pd.read_csv(path)
df_norm_updated = df_norm.append(df).drop_duplicates(keep="last", subset='index')
assert len(df_norm.columns) == len(df_norm_updated.columns)
df_norm_updated.to_csv(path,
index=False)
else:
df.to_csv(os.path.join(path), index=False)
|