Spaces:
Running
Running
File size: 4,149 Bytes
5654c4a e1c703d 5654c4a e1c703d 5654c4a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
# take min similarity and max similarity from the calibration data, compute linspace of probabalitty
import numpy as np
import pandas as pd
import argparse
from protein_conformal.util import *
import tqdm
def main(args):
sim2prob = pd.DataFrame(columns=["similarity", "prob_exact_p0", "prob_exact_p1", "prob_partial_p0", "prob_partial_p1"])
# Get a probability for each hit based on the distance using Venn-Abers / isotonic regression
# load calibration data
data = np.load(
args.cal_data,
allow_pickle=True,
)
print("loading calibration data")
n_calib = args.n_calib
np.random.shuffle(data)
cal_data = data[:n_calib]
X_cal, y_cal = get_sims_labels(cal_data, partial=False)
X_cal = X_cal.flatten()
y_cal = y_cal.flatten()
print("Get probability distribution for a grid of similarity bins")
min_sim, max_sim = X_cal.min(), X_cal.max()
# create bins for similarity scores to get SVA probabilities
bins = np.linspace(min_sim, max_sim, args.n_bins)
for d in tqdm.tqdm(bins, desc="Calculating probabilities"):
p_0, p_1 = simplifed_venn_abers_prediction(X_cal, y_cal, d)
# Replace append with concat
new_row = pd.DataFrame({
"similarity": [d],
"prob_exact_p0": [p_0],
"prob_exact_p1": [p_1],
"prob_partial_p0": [np.nan], # Will be filled later if partial=True
"prob_partial_p1": [np.nan] # Will be filled later if partial=True
})
sim2prob = pd.concat([sim2prob, new_row], ignore_index=True)
if args.partial:
# TODO: this stage may not be necessary, but we noticed sometimes that shuffling the data would mess up the original file
data = np.load(
args.cal_data,
allow_pickle=True,
)
print("loading calibration data")
np.random.shuffle(data)
cal_data = data[:n_calib]
# partial = True
X_cal, y_cal = get_sims_labels(cal_data, partial=True)
X_cal = X_cal.flatten()
y_cal = y_cal.flatten()
print("getting partial probabilities")
for i, d in tqdm.tqdm(enumerate(bins), desc="Calculating partial probabilities", total=len(bins)):
p_0, p_1 = simplifed_venn_abers_prediction(X_cal, y_cal, d)
# add to column "prob_partial" for the row with the appropriate similarity score bin, i
sim2prob.loc[i, "prob_partial_p0"] = p_0
sim2prob.loc[i, "prob_partial_p1"] = p_1
print("saving df new probabilities")
sim2prob.to_csv(
args.output,
index=False,
)
# save the probabilities for the exact hits, then calculate the probabilities for partial hits
print("saving df exact probabilities")
sim2prob.to_csv(
args.output,
index=False,
)
def parse_args():
parser = argparse.ArgumentParser("Get probabilities for similarity score distribution of a given dataset, so its precomputed")
parser.add_argument(
"--cal_data",
type=str,
default="data/pfam_new_proteins.npy",
help="Calibration dataset (use pfam_new_proteins.npy, NOT the backup with leakage)"
)
parser.add_argument(
"--output",
type=str,
default="data/sim2prob_lookup.csv",
help="Output file for the dataframe mapping similarities to probabilities",
)
parser.add_argument(
"--partial",
type=bool,
default=False,
help="Generate probability of partial hits given similarity score bins",
)
parser.add_argument(
"--n_bins", type=int, default=100, help="Number of bins to use for the similarity scores"
)
# parser.add_argument(
# "--alpha", type=float, default=0.1, help="Alpha value for the algorithm"
# )
# parser.add_argument(
# "--num_trials", type=int, default=100, help="Number of trials to run"
# )
parser.add_argument(
"--n_calib", type=int, default=100, help="Number of calibration data points"
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
main(args)
|