| | import numpy as np |
| | import pandas as pd |
| | from tqdm import tqdm |
| | from datetime import datetime |
| | import pickle |
| | import datetime |
| | import os |
| | import sys |
| | from pathlib import Path |
| | sys.path.append(os.path.dirname(os.path.abspath(__file__)) + './../..') |
| | if not os.path.exists("./data/dict"): |
| | os.makedirs("./data/dict") |
| |
|
| | class Generator(): |
| | def __init__(self,cohort_output,if_mort,if_admn,if_los,feat_cond,feat_lab,feat_proc,feat_med,impute,include_time=24,bucket=1,predW=0): |
| | self.impute=impute |
| | self.feat_cond,self.feat_proc,self.feat_med,self.feat_lab = feat_cond,feat_proc,feat_med,feat_lab |
| | self.cohort_output=cohort_output |
| | |
| | self.data = self.generate_adm() |
| | print("[ READ COHORT ]") |
| | self.generate_feat() |
| | print("[ READ ALL FEATURES ]") |
| | if if_mort: |
| | print(predW) |
| | self.mortality_length(include_time,predW) |
| | print("[ PROCESSED TIME SERIES TO EQUAL LENGTH ]") |
| | elif if_admn: |
| | self.readmission_length(include_time) |
| | print("[ PROCESSED TIME SERIES TO EQUAL LENGTH ]") |
| | elif if_los: |
| | self.los_length(include_time) |
| | print("[ PROCESSED TIME SERIES TO EQUAL LENGTH ]") |
| | self.smooth_meds(bucket) |
| | |
| | |
| | |
| | |
| | |
| | |
| | print("[ SUCCESSFULLY SAVED DATA DICTIONARIES ]") |
| | |
| | def generate_feat(self): |
| | if(self.feat_cond): |
| | print("[ ======READING DIAGNOSIS ]") |
| | self.generate_cond() |
| | if(self.feat_proc): |
| | print("[ ======READING PROCEDURES ]") |
| | self.generate_proc() |
| | if(self.feat_med): |
| | print("[ ======READING MEDICATIONS ]") |
| | self.generate_meds() |
| | if(self.feat_lab): |
| | print("[ ======READING LABS ]") |
| | self.generate_labs() |
| | |
| | |
| | def generate_adm(self): |
| | data=pd.read_csv(f"./data/cohort/{self.cohort_output}.csv.gz", compression='gzip', header=0, index_col=None) |
| | data['admittime'] = pd.to_datetime(data['admittime']) |
| | data['dischtime'] = pd.to_datetime(data['dischtime']) |
| | data['los']=pd.to_timedelta(data['dischtime']-data['admittime'],unit='h') |
| | data['los']=data['los'].astype(str) |
| | data[['days', 'dummy','hours']] = data['los'].str.split(' ', -1, expand=True) |
| | data[['hours','min','sec']] = data['hours'].str.split(':', -1, expand=True) |
| | data['los']=pd.to_numeric(data['days'])*24+pd.to_numeric(data['hours']) |
| | data=data.drop(columns=['days', 'dummy','hours','min','sec']) |
| | data=data[data['los']>0] |
| | data['Age']=data['Age'].astype(int) |
| | return data |
| | |
| | def generate_cond(self): |
| | cond=pd.read_csv("./data/features/preproc_diag.csv.gz", compression='gzip', header=0, index_col=None) |
| | cond=cond[cond['hadm_id'].isin(self.data['hadm_id'])] |
| | cond_per_adm = cond.groupby('hadm_id').size().max() |
| | self.cond, self.cond_per_adm = cond, cond_per_adm |
| | |
| | def generate_proc(self): |
| | proc=pd.read_csv("./data/features/preproc_proc.csv.gz", compression='gzip', header=0, index_col=None) |
| | proc=proc[proc['hadm_id'].isin(self.data['hadm_id'])] |
| | proc[['start_days', 'dummy','start_hours']] = proc['proc_time_from_admit'].str.split(' ', -1, expand=True) |
| | proc[['start_hours','min','sec']] = proc['start_hours'].str.split(':', -1, expand=True) |
| | proc['start_time']=pd.to_numeric(proc['start_days'])*24+pd.to_numeric(proc['start_hours']) |
| | proc=proc.drop(columns=['start_days', 'dummy','start_hours','min','sec']) |
| | proc=proc[proc['start_time']>=0] |
| | |
| | |
| | proc=pd.merge(proc,self.data[['hadm_id','los']],on='hadm_id',how='left') |
| | proc['sanity']=proc['los']-proc['start_time'] |
| | proc=proc[proc['sanity']>0] |
| | del proc['sanity'] |
| | |
| | self.proc=proc |
| | |
| | def generate_labs(self): |
| | chunksize = 10000000 |
| | final=pd.DataFrame() |
| | for labs in tqdm(pd.read_csv("./data/features/preproc_labs.csv.gz", compression='gzip', header=0, index_col=None,chunksize=chunksize)): |
| | labs=labs[labs['hadm_id'].isin(self.data['hadm_id'])] |
| | labs[['start_days', 'dummy','start_hours']] = labs['lab_time_from_admit'].str.split(' ', -1, expand=True) |
| | labs[['start_hours','min','sec']] = labs['start_hours'].str.split(':', -1, expand=True) |
| | labs['start_time']=pd.to_numeric(labs['start_days'])*24+pd.to_numeric(labs['start_hours']) |
| | labs=labs.drop(columns=['start_days', 'dummy','start_hours','min','sec']) |
| | labs=labs[labs['start_time']>=0] |
| |
|
| | |
| | labs=pd.merge(labs,self.data[['hadm_id','los']],on='hadm_id',how='left') |
| | labs['sanity']=labs['los']-labs['start_time'] |
| | labs=labs[labs['sanity']>0] |
| | del labs['sanity'] |
| | |
| | if final.empty: |
| | final=labs |
| | else: |
| | final=final.append(labs, ignore_index=True) |
| |
|
| | self.labs=final |
| | |
| | def generate_meds(self): |
| | meds=pd.read_csv("./data/features/preproc_med.csv.gz", compression='gzip', header=0, index_col=None) |
| | meds[['start_days', 'dummy','start_hours']] = meds['start_hours_from_admit'].str.split(' ', -1, expand=True) |
| | meds[['start_hours','min','sec']] = meds['start_hours'].str.split(':', -1, expand=True) |
| | meds['start_time']=pd.to_numeric(meds['start_days'])*24+pd.to_numeric(meds['start_hours']) |
| | meds[['start_days', 'dummy','start_hours']] = meds['stop_hours_from_admit'].str.split(' ', -1, expand=True) |
| | meds[['start_hours','min','sec']] = meds['start_hours'].str.split(':', -1, expand=True) |
| | meds['stop_time']=pd.to_numeric(meds['start_days'])*24+pd.to_numeric(meds['start_hours']) |
| | meds=meds.drop(columns=['start_days', 'dummy','start_hours','min','sec']) |
| | |
| | meds['sanity']=meds['stop_time']-meds['start_time'] |
| | meds=meds[meds['sanity']>0] |
| | del meds['sanity'] |
| | |
| | meds=meds[meds['hadm_id'].isin(self.data['hadm_id'])] |
| | meds=pd.merge(meds,self.data[['hadm_id','los']],on='hadm_id',how='left') |
| |
|
| | |
| | meds['sanity']=meds['los']-meds['start_time'] |
| | meds=meds[meds['sanity']>0] |
| | del meds['sanity'] |
| | |
| | meds.loc[meds['stop_time'] > meds['los'],'stop_time']=meds.loc[meds['stop_time'] > meds['los'],'los'] |
| | del meds['los'] |
| | |
| | meds['dose_val_rx']=meds['dose_val_rx'].apply(pd.to_numeric, errors='coerce') |
| | |
| | |
| | self.meds=meds |
| | |
| | |
| | def mortality_length(self,include_time,predW): |
| | self.los=include_time |
| | self.data=self.data[(self.data['los']>=include_time+predW)] |
| | self.hids=self.data['hadm_id'].unique() |
| | |
| | if(self.feat_cond): |
| | self.cond=self.cond[self.cond['hadm_id'].isin(self.data['hadm_id'])] |
| | |
| | self.data['los']=include_time |
| | |
| | if(self.feat_med): |
| | self.meds=self.meds[self.meds['hadm_id'].isin(self.data['hadm_id'])] |
| | self.meds=self.meds[self.meds['start_time']<=include_time] |
| | self.meds.loc[self.meds.stop_time >include_time, 'stop_time']=include_time |
| | |
| | |
| | |
| | if(self.feat_proc): |
| | self.proc=self.proc[self.proc['hadm_id'].isin(self.data['hadm_id'])] |
| | self.proc=self.proc[self.proc['start_time']<=include_time] |
| | |
| | |
| | if(self.feat_lab): |
| | self.labs=self.labs[self.labs['hadm_id'].isin(self.data['hadm_id'])] |
| | self.labs=self.labs[self.labs['start_time']<=include_time] |
| | |
| | |
| | self.los=include_time |
| | |
| | def los_length(self,include_time): |
| | self.los=include_time |
| | self.data=self.data[(self.data['los']>=include_time)] |
| | self.hids=self.data['hadm_id'].unique() |
| | |
| | if(self.feat_cond): |
| | self.cond=self.cond[self.cond['hadm_id'].isin(self.data['hadm_id'])] |
| | |
| | self.data['los']=include_time |
| | |
| | if(self.feat_med): |
| | self.meds=self.meds[self.meds['hadm_id'].isin(self.data['hadm_id'])] |
| | self.meds=self.meds[self.meds['start_time']<=include_time] |
| | self.meds.loc[self.meds.stop_time >include_time, 'stop_time']=include_time |
| | |
| | |
| | |
| | if(self.feat_proc): |
| | self.proc=self.proc[self.proc['hadm_id'].isin(self.data['hadm_id'])] |
| | self.proc=self.proc[self.proc['start_time']<=include_time] |
| | |
| | |
| | if(self.feat_lab): |
| | self.labs=self.labs[self.labs['hadm_id'].isin(self.data['hadm_id'])] |
| | self.labs=self.labs[self.labs['start_time']<=include_time] |
| | |
| | |
| | |
| | |
| | def readmission_length(self,include_time): |
| | self.los=include_time |
| | self.data=self.data[(self.data['los']>=include_time)] |
| | self.hids=self.data['hadm_id'].unique() |
| | if(self.feat_cond): |
| | self.cond=self.cond[self.cond['hadm_id'].isin(self.data['hadm_id'])] |
| | self.data['select_time']=self.data['los']-include_time |
| | self.data['los']=include_time |
| |
|
| | |
| | |
| | |
| | if(self.feat_med): |
| | self.meds=self.meds[self.meds['hadm_id'].isin(self.data['hadm_id'])] |
| | self.meds=pd.merge(self.meds,self.data[['hadm_id','select_time']],on='hadm_id',how='left') |
| | self.meds['stop_time']=self.meds['stop_time']-self.meds['select_time'] |
| | self.meds['start_time']=self.meds['start_time']-self.meds['select_time'] |
| | self.meds=self.meds[self.meds['stop_time']>=0] |
| | self.meds.loc[self.meds.start_time <0, 'start_time']=0 |
| | |
| | |
| | if(self.feat_proc): |
| | self.proc=self.proc[self.proc['hadm_id'].isin(self.data['hadm_id'])] |
| | self.proc=pd.merge(self.proc,self.data[['hadm_id','select_time']],on='hadm_id',how='left') |
| | self.proc['start_time']=self.proc['start_time']-self.proc['select_time'] |
| | self.proc=self.proc[self.proc['start_time']>=0] |
| | |
| | |
| | if(self.feat_lab): |
| | self.labs=self.labs[self.labs['hadm_id'].isin(self.data['hadm_id'])] |
| | self.labs=pd.merge(self.labs,self.data[['hadm_id','select_time']],on='hadm_id',how='left') |
| | self.labs['start_time']=self.labs['start_time']-self.labs['select_time'] |
| | self.labs=self.labs[self.labs['start_time']>=0] |
| |
|
| | |
| | def smooth_meds(self,bucket): |
| | final_meds=pd.DataFrame() |
| | final_proc=pd.DataFrame() |
| | final_labs=pd.DataFrame() |
| | |
| | if(self.feat_med): |
| | self.meds=self.meds.sort_values(by=['start_time']) |
| | if(self.feat_proc): |
| | self.proc=self.proc.sort_values(by=['start_time']) |
| | |
| | t=0 |
| | for i in tqdm(range(0,self.los,bucket)): |
| | |
| | if(self.feat_med): |
| | sub_meds=self.meds[(self.meds['start_time']>=i) & (self.meds['start_time']<i+bucket)].groupby(['hadm_id','drug_name']).agg({'stop_time':'max','subject_id':'max','dose_val_rx':np.nanmean}) |
| | sub_meds=sub_meds.reset_index() |
| | sub_meds['start_time']=t |
| | sub_meds['stop_time']=sub_meds['stop_time']/bucket |
| | if final_meds.empty: |
| | final_meds=sub_meds |
| | else: |
| | final_meds=final_meds.append(sub_meds) |
| | |
| | |
| | if(self.feat_proc): |
| | sub_proc=self.proc[(self.proc['start_time']>=i) & (self.proc['start_time']<i+bucket)].groupby(['hadm_id','icd_code']).agg({'subject_id':'max'}) |
| | sub_proc=sub_proc.reset_index() |
| | sub_proc['start_time']=t |
| | if final_proc.empty: |
| | final_proc=sub_proc |
| | else: |
| | final_proc=final_proc.append(sub_proc) |
| | |
| | |
| | if(self.feat_lab): |
| | sub_labs=self.labs[(self.labs['start_time']>=i) & (self.labs['start_time']<i+bucket)].groupby(['hadm_id','itemid']).agg({'subject_id':'max','valuenum':np.nanmean}) |
| | sub_labs=sub_labs.reset_index() |
| | sub_labs['start_time']=t |
| | if final_labs.empty: |
| | final_labs=sub_labs |
| | else: |
| | final_labs=final_labs.append(sub_labs) |
| | |
| | t=t+1 |
| | los=int(self.los/bucket) |
| | |
| | |
| | if(self.feat_med): |
| | f2_meds=final_meds.groupby(['hadm_id','drug_name']).size() |
| | self.med_per_adm=f2_meds.groupby('hadm_id').sum().reset_index()[0].max() |
| | self.medlength_per_adm=final_meds.groupby('hadm_id').size().max() |
| | |
| | |
| | if(self.feat_proc): |
| | f2_proc=final_proc.groupby(['hadm_id','icd_code']).size() |
| | self.proc_per_adm=f2_proc.groupby('hadm_id').sum().reset_index()[0].max() |
| | self.proclength_per_adm=final_proc.groupby('hadm_id').size().max() |
| | |
| | |
| | if(self.feat_lab): |
| | f2_labs=final_labs.groupby(['hadm_id','itemid']).size() |
| | self.labs_per_adm=f2_labs.groupby('hadm_id').sum().reset_index()[0].max() |
| | self.labslength_per_adm=final_labs.groupby('hadm_id').size().max() |
| |
|
| | |
| | print("[ PROCESSED TIME SERIES TO EQUAL TIME INTERVAL ]") |
| | self.create_Dict(final_meds,final_proc,final_labs,los) |
| | |
| | |
| | def create_Dict(self,meds,proc,labs,los): |
| | print("[ CREATING DATA DICTIONARIES ]") |
| | dataDic={} |
| | labels_csv=pd.DataFrame(columns=['hadm_id','label']) |
| | labels_csv['hadm_id']=pd.Series(self.hids) |
| | labels_csv['label']=0 |
| | for hid in self.hids: |
| | grp=self.data[self.data['hadm_id']==hid] |
| | dataDic[hid]={'Cond':{},'Proc':{},'Med':{},'Lab':{},'ethnicity':grp['ethnicity'].iloc[0],'age':int(grp['Age']),'gender':grp['gender'].iloc[0],'label':int(grp['label'])} |
| | |
| | for hid in tqdm(self.hids): |
| | grp=self.data[self.data['hadm_id']==hid] |
| |
|
| | |
| | if(self.feat_med): |
| | feat=meds['drug_name'].unique() |
| | df2=meds[meds['hadm_id']==hid] |
| | if df2.shape[0]==0: |
| | val=pd.DataFrame(np.zeros([los,len(feat)]),columns=feat) |
| | val=val.fillna(0) |
| | val.columns=pd.MultiIndex.from_product([["MEDS"], val.columns]) |
| | else: |
| | val=df2.pivot_table(index='start_time',columns='drug_name',values='dose_val_rx') |
| | df2=df2.pivot_table(index='start_time',columns='drug_name',values='stop_time') |
| | |
| | add_indices = pd.Index(range(los)).difference(df2.index) |
| | add_df = pd.DataFrame(index=add_indices, columns=df2.columns).fillna(np.nan) |
| | df2=pd.concat([df2, add_df]) |
| | df2=df2.sort_index() |
| | df2=df2.ffill() |
| | df2=df2.fillna(0) |
| |
|
| | val=pd.concat([val, add_df]) |
| | val=val.sort_index() |
| | val=val.ffill() |
| | val=val.fillna(-1) |
| | |
| | df2.iloc[:,0:]=df2.iloc[:,0:].sub(df2.index,0) |
| | df2[df2>0]=1 |
| | df2[df2<0]=0 |
| | val.iloc[:,0:]=df2.iloc[:,0:]*val.iloc[:,0:] |
| | |
| | dataDic[hid]['Med']['signal']=df2.iloc[:,0:].to_dict(orient="list") |
| | dataDic[hid]['Med']['val']=val.iloc[:,0:].to_dict(orient="list") |
| | |
| |
|
| | |
| | |
| | |
| | if(self.feat_proc): |
| | feat=proc['icd_code'].unique() |
| | df2=proc[proc['hadm_id']==hid] |
| | if df2.shape[0]==0: |
| | df2=pd.DataFrame(np.zeros([los,len(feat)]),columns=feat) |
| | df2=df2.fillna(0) |
| | df2.columns=pd.MultiIndex.from_product([["PROC"], df2.columns]) |
| | else: |
| | df2['val']=1 |
| | df2=df2.pivot_table(index='start_time',columns='icd_code',values='val') |
| | |
| | add_indices = pd.Index(range(los)).difference(df2.index) |
| | add_df = pd.DataFrame(index=add_indices, columns=df2.columns).fillna(np.nan) |
| | df2=pd.concat([df2, add_df]) |
| | df2=df2.sort_index() |
| | df2=df2.fillna(0) |
| | df2[df2>0]=1 |
| | |
| | dataDic[hid]['Proc']=df2.to_dict(orient="list") |
| | |
| | |
| | |
| | if(self.feat_lab): |
| | feat=labs['itemid'].unique() |
| | df2=labs[labs['hadm_id']==hid] |
| | if df2.shape[0]==0: |
| | val=pd.DataFrame(np.zeros([los,len(feat)]),columns=feat) |
| | val=val.fillna(0) |
| | val.columns=pd.MultiIndex.from_product([["LAB"], val.columns]) |
| | else: |
| | val=df2.pivot_table(index='start_time',columns='itemid',values='valuenum') |
| | df2['val']=1 |
| | df2=df2.pivot_table(index='start_time',columns='itemid',values='val') |
| | |
| | add_indices = pd.Index(range(los)).difference(df2.index) |
| | add_df = pd.DataFrame(index=add_indices, columns=df2.columns).fillna(np.nan) |
| | df2=pd.concat([df2, add_df]) |
| | df2=df2.sort_index() |
| | df2=df2.fillna(0) |
| |
|
| | val=pd.concat([val, add_df]) |
| | val=val.sort_index() |
| | if self.impute=='Mean': |
| | val=val.ffill() |
| | val=val.bfill() |
| | val=val.fillna(val.mean()) |
| | elif self.impute=='Median': |
| | val=val.ffill() |
| | val=val.bfill() |
| | val=val.fillna(val.median()) |
| | val=val.fillna(0) |
| |
|
| | df2[df2>0]=1 |
| | df2[df2<0]=0 |
| |
|
| | |
| | dataDic[hid]['Lab']['signal']=df2.iloc[:,0:].to_dict(orient="list") |
| | dataDic[hid]['Lab']['val']=val.iloc[:,0:].to_dict(orient="list") |
| | |
| | |
| | |
| | if(self.feat_cond): |
| | feat=self.cond['new_icd_code'].unique() |
| | grp=self.cond[self.cond['hadm_id']==hid] |
| | if(grp.shape[0]==0): |
| | dataDic[hid]['Cond']={'fids':list(['<PAD>'])} |
| |
|
| | else: |
| | dataDic[hid]['Cond']={'fids':list(grp['new_icd_code'])} |
| | |
| | |
| | |
| | |
| | metaDic={'Cond':{},'Proc':{},'Med':{},'Lab':{},'LOS':{}} |
| | metaDic['LOS']=los |
| | with open("./data/dict/dataDic", 'wb') as fp: |
| | pickle.dump(dataDic, fp) |
| |
|
| | with open("./data/dict/hadmDic", 'wb') as fp: |
| | pickle.dump(self.hids, fp) |
| | |
| | with open("./data/dict/ethVocab", 'wb') as fp: |
| | pickle.dump(list(self.data['ethnicity'].unique()), fp) |
| | self.eth_vocab = self.data['ethnicity'].nunique() |
| | |
| | with open("./data/dict/ageVocab", 'wb') as fp: |
| | pickle.dump(list(self.data['Age'].unique()), fp) |
| | self.age_vocab = self.data['Age'].nunique() |
| | |
| | with open("./data/dict/insVocab", 'wb') as fp: |
| | pickle.dump(list(self.data['insurance'].unique()), fp) |
| | self.ins_vocab = self.data['insurance'].nunique() |
| | |
| | if(self.feat_med): |
| | with open("./data/dict/medVocab", 'wb') as fp: |
| | pickle.dump(list(meds['drug_name'].unique()), fp) |
| | self.med_vocab = meds['drug_name'].nunique() |
| | metaDic['Med']=self.med_per_adm |
| | |
| | if(self.feat_cond): |
| | with open("./data/dict/condVocab", 'wb') as fp: |
| | pickle.dump(list(self.cond['new_icd_code'].unique()), fp) |
| | self.cond_vocab = self.cond['new_icd_code'].nunique() |
| | metaDic['Cond']=self.cond_per_adm |
| | |
| | if(self.feat_proc): |
| | with open("./data/dict/procVocab", 'wb') as fp: |
| | pickle.dump(list(proc['icd_code'].unique()), fp) |
| | self.proc_vocab = proc['icd_code'].unique() |
| | metaDic['Proc']=self.proc_per_adm |
| | |
| | if(self.feat_lab): |
| | with open("./data/dict/labsVocab", 'wb') as fp: |
| | pickle.dump(list(labs['itemid'].unique()), fp) |
| | self.lab_vocab = labs['itemid'].unique() |
| | metaDic['Lab']=self.labs_per_adm |
| | |
| | with open("./data/dict/metaDic", 'wb') as fp: |
| | pickle.dump(metaDic, fp) |
| |
|
| |
|
| |
|
| |
|
| |
|