Datasets:
File size: 1,672 Bytes
73a57ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import time
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm
from molfeat.calc import FPCalculator
from molfeat.trans import MoleculeTransformer
# Initialize transformer
calc = FPCalculator("ecfp")
mol_transf = MoleculeTransformer(calc, n_jobs=10)
def transform_and_save(df, output_path, split_name="", batch_size=100000):
start = time.time()
print(f"\nStarting transformation for {split_name}...")
smiles = df['clean_smiles'].values
values = df['value'].values
all_features = []
for i in tqdm(range(0, len(smiles), batch_size), desc=f"{split_name} batches"):
batch_smiles = smiles[i:i + batch_size]
batch_fps = mol_transf(batch_smiles)
batch_fps = np.stack(batch_fps)
all_features.append(batch_fps)
features = np.vstack(all_features)
df_fps = pd.DataFrame(features, columns=[f"feature_{i}" for i in range(features.shape[1])])
df_fps["value"] = values # Append the label
pq.write_table(pa.Table.from_pandas(df_fps), output_path)
end = time.time()
print(f"Finished {split_name} in {end - start:.2f} seconds.")
# Process each split
data_train = pq.read_table("product/d2_split/train.parquet").to_pandas()
transform_and_save(data_train, "intermediate_data/d2/data_train_features.parquet", "train")
data_val = pq.read_table("product/d2_split/val.parquet").to_pandas()
transform_and_save(data_val, "intermediate_data/d2/data_val_features.parquet", "validation")
data_test = pq.read_table("product/d2_split/test.parquet").to_pandas()
transform_and_save(data_test, "intermediate_data/d2/data_test_features.parquet", "test")
|