anonymous-submission-acl2025's picture
add 17
8a79f2e
import os
import pickle
import sys
from datetime import datetime
import pandas as pd
import numpy as np
import yaml
"""A one liner to serialize df's into h5 files """
def save_hdf(df, path):
if ".pickle" in path:
print(f"pickle in {path}!")
path = path.replace(".pickle","")
if ".h5" not in path:
path = path + ".h5"
if len(df) == 0:
print("not saving b/c data frame is empty")
else:
# ensure all datetime columns are parsed correctly by adding _at suffix (see pd.read_json docs)
date_cols = [x for x in df.columns if "Date" in x]
for date_col in date_cols:
#date object
if df[date_col].dtype == np.object:
df[date_col] = pd.to_datetime(df[date_col])
df = df.rename(columns = {date_col : date_col+"_date_object"})
df.to_hdf(path_or_buf = path, key = 'df', format="table", mode = 'w')
def open_hdf(path):
if ".pickle" in path:
print(f"pickle in {path}!")
path = path.replace(".pickle","")
if ".h5" not in path:
path = path + ".h5"
if os.path.getsize(path)==0:
print("invalid file: size = 0")
return pd.DataFrame()
df = pd.read_hdf(path_or_buf = path, key= 'df' , mode= "r")
# ensure all datetime columns are parsed correctly by adding _at suffix (see pd.read_json docs)
date_object_cols = [x for x in df.columns if "date_object" in x]
for date_object_col in date_object_cols:
df[date_object_col] = df[date_object_col].dt.date
df = df.rename(columns = {date_object_col:date_object_col.replace("_date_object","")})
return df
"""
saves a dict or dataframe as a pickle
- obj: the python object
- path: file path
- df_bool whether or not the object is a dataframe
- test_override: if true, then during testing, it's ok to overwrite the previous pickle
"""
def save_pickle(obj, path, df_bool=True, test_override = False):
config_user = open_yaml("config_user.yaml")
#if config_user["local"]["test"]:
# if "_test" not in path:
# pass
#if test_override:
# pass
#else:
#print(f"TESTING: save this file? {path}")
#continue_ans = input("Enter (Y or N) \n ")
#if continue_ans != "Y":
# sys.exit()
if ".pickle" not in path:
path = path + ".pickle"
if df_bool == True:
assert len(list(obj.index)) == len(set(list(obj.index)))
with open(path, 'wb') as outfile:
pickle.dump(obj.to_dict(), outfile)
else:
outfile = open(path, 'wb')
pickle.dump(obj, outfile)
outfile.close()
def read_gz(path):
if ".gz" not in path:
path = path + ".gz"
df = pd.read_csv(path, compression='gzip')
date_cols = [x for x in df.columns if "Date" in x]
for date_col in date_cols:
try:
df[date_col] = pd.to_datetime(df[date_col], infer_datetime_format=True)
except:
print(f"couldn't parse datetime of {date_col}")
return df
"""A one liner to un-pickle df's and other stuff"""
def open_pickle(path, df_bool=True):
if ".pickle" not in path:
path = path + ".pickle"
start = datetime.now()
#print(f"\t Reading {path}")
infile = open(path, 'rb')
obj = pickle.load(infile)
infile.close()
end = datetime.now()
diff = end-start
#print(f"\t Done reading. Took: {round(diff.seconds/60,2)} minutes")
if df_bool == True:
return pd.DataFrame(obj)
else:
return obj
""" Opens a df (pickled or not) if it's in path, else it creates an empty dataframe with the specified col_names"""
def soft_df_open(path, obj_type="pickle",cols=[]):
if ".pickle" not in path:
path = path + ".pickle"
if os.path.isfile(path) == True:
if obj_type == "pickle":
df = open_pickle(path, df_bool=True)
elif ".csv" in path:
df = pd.read_csv(path)
else:
df=""
print("Don't know what kind of file you want to read")
sys.exit()
else:
print(f"Could not find {path}")
df = pd.DataFrame(columns=cols)
return df
""" Opens a yaml file as a dictionary"""
def open_yaml(path):
with open(path, 'r') as stream:
dic = yaml.safe_load(stream)
return dic
def update_csv(df, path):
""" appends a csv with provided df, specified in path, or creates csv with df provided
assumes the index is in the column named 'index'"""
if os.path.exists(path):
df_norm = pd.read_csv(path)
df_norm_updated = df_norm.append(df).drop_duplicates(keep="last", subset='index')
assert len(df_norm.columns) == len(df_norm_updated.columns)
df_norm_updated.to_csv(path,
index=False)
else:
df.to_csv(os.path.join(path), index=False)