| import datasets | |
| import yaml | |
| import pyarrow as pa | |
| import pyarrow.parquet as pq | |
| from sklearn.model_selection import train_test_split | |
| import pandas as pd | |
| with open("parameters.yaml") as parameters_file: | |
| parameters = yaml.safe_load(parameters_file) | |
| df = pd.read_csv("hf://datasets/aanyam/ESSENCEDock_595Project/ESSENCEDock_dataset_final.csv") | |
| train_df, test_df = train_test_split(df, test_size=0.2, stratify=df['Target Name'], random_state=42) | |
| data_train = train_df[train_df["Target Name"].isin(parameters['targets'])] | |
| pq.write_table( | |
| pa.Table.from_pandas(data_train), | |
| "intermediate_data/data_train.parquet") | |
| data_test = test_df[test_df["Target Name"].isin(parameters['targets'])] | |
| pq.write_table( | |
| pa.Table.from_pandas(data_test), | |
| "intermediate_data/data_test.parquet") | |