code
stringlengths
17
6.64M
def test_ou_process(): DATE = int(pd.to_datetime('20210205').to_datetime64()) MKT_OPEN = (DATE + str_to_ns('09:30:00')) MKT_CLOSE = (DATE + str_to_ns('16:00:00')) r_bar = 100000 kappa_oracle = 1.67e-16 fund_vol = 5e-05 megashock_lambda_a = 2.77778e-18 megashock_mean = 1000 megashock_var = 50000 stock_name = 'AAPL' symbols = {stock_name: {'r_bar': r_bar, 'kappa': kappa_oracle, 'fund_vol': fund_vol, 'megashock_lambda_a': megashock_lambda_a, 'megashock_mean': megashock_mean, 'megashock_var': megashock_var}} plot_logs = False if plot_logs: (fig, ax) = plt.subplots(figsize=(12, 8)) ntraces = 10 for seed in range(ntraces): random_state = np.random.RandomState(seed) symbols[stock_name]['random_state'] = random_state oracle = SparseMeanRevertingOracle(MKT_OPEN, MKT_CLOSE, symbols) out_df = {'minute': [], 'price': []} for minute in range(MKT_OPEN, MKT_CLOSE, int((60 * 1000000000.0))): rT = oracle.observe_price(stock_name, minute, sigma_n=0, random_state=random_state) out_df['price'] += [rT] out_df['minute'] += [minute] out_df = pd.DataFrame(out_df) assert (0.9 < (out_df['price'].mean() / r_bar) < 1.1), 'The generated fundamental value is not mean-reverting' if plot_logs: out_df['price'] /= out_df.iloc[0]['price'] out_df['price'].plot(ax=ax) if plot_logs: os.makedirs('logs', exist_ok=True) plt.savefig(os.path.join('logs', 'test_ou_process.png')) assert True
def test_rmsc04(): config = build_config_rmsc04(seed=1, book_logging=False, end_time='10:00:00', log_orders=False, exchange_log_orders=False) kernel_seed = np.random.randint(low=0, high=(2 ** 32), dtype='uint64') kernel = Kernel(log_dir='__test_logs', random_state=np.random.RandomState(seed=kernel_seed), **subdict(config, ['start_time', 'stop_time', 'agents', 'agent_latency_model', 'default_computation_delay', 'custom_properties']), skip_log=True) kernel.run() shutil.rmtree('log/__test_logs') assert True
class policyPassive(): def __init__(self): self.name = 'passive' def get_action(self, state): return 1
class policyAggressive(): def __init__(self): self.name = 'aggressive' def get_action(self, state): return 0
class policyRandom(): def __init__(self): self.name = 'random' def get_action(self, state): return np.random.choice([0, 1])
class policyRandomWithNoAction(): def __init__(self): self.name = 'random_no_action' def get_action(self, state): return np.random.choice([0, 1, 2])
class policyRL(): '\n policy learned during the training\n get the best policy from training {name_xp}\n Use this policy to compute action\n ' def __init__(self): self.name = 'rl' name_xp = 'dqn_execution_demo_4' data_folder = f'~/ray_results/{name_xp}' analysis = Analysis(data_folder) trial_dataframes = analysis.trial_dataframes trials = list(trial_dataframes.keys()) best_trial_path = analysis.get_best_logdir(metric='episode_reward_mean', mode='max') best_checkpoint = analysis.get_best_checkpoint(trial=best_trial_path, mode='max') config = dqn.DEFAULT_CONFIG.copy() config['framework'] = 'torch' config['observation_filter'] = 'MeanStdFilter' config['hiddens'] = [50, 20] config['env_config'] = {'background_config': 'rmsc04', 'timestep_duration': '10S', 'execution_window': '04:00:00', 'parent_order_size': 20000, 'order_fixed_size': 50, 'not_enough_reward_update': (- 100)} self.trainer = dqn.DQNTrainer(config=config, env='markets-execution-v0') self.trainer.restore(best_checkpoint) def get_action(self, state): return self.trainer.compute_action(state)
def generate_env(seed): '\n generates specific environment with the parameters defined and set the seed\n ' env = gym.make('markets-execution-v0', background_config='rmsc04', timestep_duration='10S', execution_window='04:00:00', parent_order_size=20000, order_fixed_size=50, not_enough_reward_update=(- 100)) env.seed(seed) return env
def flatten_dict(d: MutableMapping, sep: str='.') -> MutableMapping: [flat_dict] = pd.json_normalize(d, sep=sep).to_dict(orient='records') return flat_dict
def run_episode(seed=None, policy=None): '\n run fully one episode for a given seed and a given policy\n ' env = generate_env(seed) state = env.reset() done = False episode_reward = 0 while (not done): action = policy.get_action(state) (state, reward, done, info) = env.step(action) episode_reward += reward output = flatten_dict(info) output['episode_reward'] = episode_reward output['name'] = policy.name return output
def run_N_episode(N): '\n run in parallel N episode of testing for the different policies defined in policies list\n heads-up: does not work yet for rllib policies - pickle error\n #https://stackoverflow.com/questions/28821910/how-to-get-around-the-pickling-error-of-python-multiprocessing-without-being-in\n\n need to run the rllib policies with following cell (not in parralel)\n \n ' policies = [policyAggressive(), policyRandom(), policyPassive(), policyRandomWithNoAction()] seeds = [i for i in range(N)] tests = [{'policy': policy, 'seed': seed} for policy in policies for seed in seeds] def wrap_run_episode(param): return run_episode(**param) outputs = p_map(wrap_run_episode, tests) return outputs
def run(config, log_dir='', kernel_seed=np.random.randint(low=0, high=(2 ** 32), dtype='uint64')): print() print('╔═══════════════════════════════════════════════════════════╗') print('β•‘ ABIDES: Agent-Based Interactive Discrete Event Simulation β•‘') print('β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•') print() kernel = Kernel(random_state=np.random.RandomState(seed=kernel_seed), log_dir='', **subdict(config, ['start_time', 'stop_time', 'agents', 'agent_latency_model', 'default_computation_delay', 'custom_properties'])) sim_start_time = dt.datetime.now() logger.info(f'Simulation Start Time: {sim_start_time}') end_state = kernel.run() sim_end_time = dt.datetime.now() logger.info(f'Simulation End Time: {sim_end_time}') logger.info(f'Time taken to run simulation: {(sim_end_time - sim_start_time)}') return end_state
def version_greaterorequal(l1, l2): if (l1[0] > l2[0]): return True elif (l1[0] < l2[0]): return False elif (l1[0] == l2[0]): if (len(l1) == 1): return True else: return version_greaterorequal(l1[1:], l2[1:])
def get_git_version(): result = subprocess.run(['git', '--version'], stdout=subprocess.PIPE).stdout.decode('utf-8') version = [int(c) for c in result.replace('git version ', '').replace('\n', '').split('.')] return version
def run_command(command, commit_sha, specific_path_underscore='0', git_path=None, pass_logdir_sha=None, old_new_flag=None): 'pass_logdir_sha is either null or tuple with arg name and function taking commit sha as input to produce arg value' if pass_logdir_sha: shutil.rmtree(pass_logdir_sha, ignore_errors=True) if (commit_sha == 'CURRENT'): if (not pass_logdir_sha): simulation_start_time = dt.datetime.now() os.system(command) simulation_end_time = dt.datetime.now() else: simulation_start_time = dt.datetime.now() os.system(f'{command} {pass_logdir_sha[0]} {pass_logdir_sha[1](commit_sha)}') simulation_end_time = dt.datetime.now() else: assert version_greaterorequal(get_git_version(), [2, 17]), 'git version needs to be >= 2.17' orig_pwd = os.getcwd() path_tmp_worktree = (((('/'.join(git_path.split('/')[:(- 1)]) + f'/tmp_{old_new_flag}_') + commit_sha) + '_') + specific_path_underscore) subprocess.run(['git', 'worktree', 'add', '--detach', path_tmp_worktree, commit_sha], stdout=subprocess.DEVNULL) os.chdir(path_tmp_worktree) if (not pass_logdir_sha): simulation_start_time = dt.datetime.now() os.system(command) simulation_end_time = dt.datetime.now() else: simulation_start_time = dt.datetime.now() os.system(f'{command} {pass_logdir_sha[0]} {pass_logdir_sha[1](commit_sha)}') simulation_end_time = dt.datetime.now() subprocess.run(['git', 'worktree', 'remove', path_tmp_worktree], stdout=subprocess.DEVNULL) os.chdir(orig_pwd) return (simulation_end_time - simulation_start_time)
def get_path(level): path = pathlib.Path(__file__).parent.absolute() path = str(path) if (level == 0): return path else: path = path.split('/')[:(- level)] return '/'.join(path)
def get_paths(parameters): specific_path = f"{parameters['new']['config']}/{parameters['shared']['end-time'].replace(':', '-')}/{parameters['shared']['seed']}" specific_path_underscore = f"{parameters['new']['config']}_{parameters['shared']['end-time'].replace(':', '-')}_{parameters['shared']['seed']}" return (specific_path, specific_path_underscore)
def run_test(test_): (parameters, old_new_flag) = test_ (specific_path, specific_path_underscore) = get_paths(parameters) now = dt.datetime.now() stamp = now.strftime('%Y%m%d%H%M%S') time = runasof.run_command(parameters['command'][old_new_flag], commit_sha=parameters[old_new_flag]['sha'], specific_path_underscore=specific_path_underscore, git_path=root_path_abides, old_new_flag=old_new_flag, pass_logdir_sha=('--log_dir', (lambda x: ((((root_path_ec2 + f'/tmp/{old_new_flag}_{stamp}/') + x) + '/') + specific_path)))) output = {} output['sha'] = parameters[old_new_flag]['sha'] output['config'] = parameters[old_new_flag]['config'] output['end-time'] = parameters['shared']['end-time'] output['seed'] = parameters['shared']['seed'] output['time'] = time if parameters['with_log']: path_to_ob = (root_path_ec2 + f"/tmp/{old_new_flag}_{stamp}/{parameters[old_new_flag]['sha']}/{specific_path}/ORDERBOOK_ABM_FULL.bz2") else: path_to_ob = 'no_log' output['path_to_ob'] = path_to_ob output['flag'] = old_new_flag return output
def compute_ob(path_old, path_new): ob_old = pd.read_pickle(path_old) ob_new = pd.read_pickle(path_new) if ob_old.equals(ob_new): return 0 else: return 1
def run_tests(LIST_PARAMETERS, varying_parameters): old_new_flags = ['old', 'new'] tests = list(itertools.product(LIST_PARAMETERS, old_new_flags)) outputs = p_map(run_test, tests) df = pd.DataFrame(outputs) df_old = df[(df['flag'] == 'old')] df_new = df[(df['flag'] == 'new')] print(f'THERE ARE {len(df_new)} TESTS RESULTS.') if LIST_PARAMETERS[0]['with_log']: path_olds = list(df_old['path_to_ob']) path_news = list(df_new['path_to_ob']) ob_comps = p_map(compute_ob, path_olds, path_news) if (sum(ob_comps) == 0): print('ALL TESTS ARE SUCCESS!') else: print(f'ALERT: {sum(ob_comps)}TEST FAILURE') df_old = df_old[(varying_parameters + ['seed', 'time'])].set_index((varying_parameters + ['seed'])) df_new = df_new[(varying_parameters + ['seed', 'time'])].set_index((varying_parameters + ['seed'])) df_diff = (df_old - df_new) df_results = df_diff.groupby(['config', 'end-time'])['time'].describe()[['mean', 'std']] df_diff_pct = ((100 * (df_old - df_new)) / df_old) df_results_pct = df_diff_pct.groupby(['config', 'end-time'])['time'].describe()[['mean', 'std']] print('*********************************************') print('*********************************************') print('OLD RUNNING TIME') print(df_old.groupby(['config', 'end-time'])['time'].describe()[['mean', 'std']]) print('*********************************************') print('*********************************************') print('NEW RUNNING TIME') with pd.option_context('display.float_format', '{:0.2f}'.format): print(df_new.groupby(['config', 'end-time'])['time'].describe()[['mean', 'std']]) print('*********************************************') print('*********************************************') print('TIME DIFFERENCE in seconds') with pd.option_context('display.float_format', '{:0.2f}'.format): df_results['mean'] = df_results['mean'].dt.total_seconds() df_results['std'] = df_results['std'].dt.total_seconds() print(df_results) print('*********************************************') print('*********************************************') print('TIME DIFFERENCE in %') with pd.option_context('display.float_format', '{:0.2f}'.format): print(df_results_pct)
def get_path(level): path = pathlib.Path(__file__).parent.absolute() path = str(path) if (level == 0): return path else: path = path.split('/')[:(- level)] return '/'.join(path)
def generate_parameter_dict(seed, config, end_time, with_log): if with_log: log_orders = True exchange_log_orders = True book_freq = 0 else: log_orders = None exchange_log_orders = None book_freq = None parameters = {'old': {'sha': 'f1968a56fdb55fd7c70be1db052be07cb701a5fb', 'script': 'abides_cmd.py', 'config': config}, 'new': {'sha': 'f1968a56fdb55fd7c70be1db052be07cb701a5fb', 'script': 'abides_cmd.py', 'config': config}, 'config_new': config, 'end-time': end_time, 'with_log': with_log, 'shared': {'end-time': end_time, 'end_time': end_time, 'seed': seed, 'verbose': 0, 'log_orders': log_orders, 'exchange_log_orders': exchange_log_orders, 'book_freq': book_freq}} parameters['command'] = generate_command(parameters) return parameters
def generate_command(parameters): specific_command_old = f"{parameters['old']['script']} -config {parameters['old']['config']}" specific_command_new = f"{parameters['new']['script']} -config {parameters['new']['config']}" shared_command = [f'--{key} {val}' for (key, val) in parameters['shared'].items()] shared_command = ' '.join(shared_command) command_old = (((f'python3 -W ignore -u ' + specific_command_old) + ' ') + shared_command) command_new = (((f'python3 -W ignore -u ' + specific_command_new) + ' ') + shared_command) return {'old': command_old, 'new': command_new}
def get_path(level): path = pathlib.Path(__file__).parent.absolute() path = str(path) if (level == 0): return path else: path = path.split('/')[:(- level)] return '/'.join(path)
def generate_parameter_dict(seed): parameters = {'sha_old': '8ab374e8d7c9f6fa6ab522502259e94e550e81b5', 'sha_new': 'ccdb7b3b0b099b89b86a6500e4f8f731a5dc6410', 'script_old': 'abides.py', 'script_new': 'abides_cmd.py', 'config_old': 'rmsc03', 'config_new': 'rmsc03_function', 'end-time': '10', 'seed': seed} return parameters
def list_pmhc_types(): return ['A0101_VTEHDTLLY_IE-1_CMV_binder', 'A0201_KTWGQYWQV_gp100_Cancer_binder', 'A0201_ELAGIGILTV_MART-1_Cancer_binder', 'A0201_CLLWSFQTSA_Tyrosinase_Cancer_binder', 'A0201_IMDQVPFSV_gp100_Cancer_binder', 'A0201_SLLMWITQV_NY-ESO-1_Cancer_binder', 'A0201_KVAELVHFL_MAGE-A3_Cancer_binder', 'A0201_KVLEYVIKV_MAGE-A1_Cancer_binder', 'A0201_CLLGTYTQDV_Kanamycin-B-dioxygenase_binder', 'A0201_LLDFVRFMGV_EBNA-3B_EBV_binder', 'A0201_LLMGTLGIVC_HPV-16E7_82-91_binder', 'A0201_CLGGLLTMV_LMP-2A_EBV_binder', 'A0201_YLLEMLWRL_LMP1_EBV_binder', 'A0201_FLYALALLL_LMP2A_EBV_binder', 'A0201_GILGFVFTL_Flu-MP_Influenza_binder', 'A0201_GLCTLVAML_BMLF1_EBV_binder', 'A0201_NLVPMVATV_pp65_CMV_binder', 'A0201_ILKEPVHGV_RT_HIV_binder', 'A0201_FLASKIGRLV_Ca2-indepen-Plip-A2_binder', 'A2402_CYTWNQMNL_WT1-(235-243)236M_Y_binder', 'A0201_RTLNAWVKV_Gag-protein_HIV_binder', 'A0201_KLQCVDLHV_PSA146-154_binder', 'A0201_LLFGYPVYV_HTLV-1_binder', 'A0201_SLFNTVATL_Gag-protein_HIV_binder', 'A0201_SLYNTVATLY_Gag-protein_HIV_binder', 'A0201_SLFNTVATLY_Gag-protein_HIV_binder', 'A0201_RMFPNAPYL_WT-1_binder', 'A0201_YLNDHLEPWI_BCL-X_Cancer_binder', 'A0201_MLDLQPETT_16E7_HPV_binder', 'A0301_KLGGALQAK_IE-1_CMV_binder', 'A0301_RLRAEAQVK_EMNA-3A_EBV_binder', 'A0301_RIAAWMATY_BCL-2L1_Cancer_binder', 'A1101_IVTDFSVIK_EBNA-3B_EBV_binder', 'A1101_AVFDRKSDAK_EBNA-3B_EBV_binder', 'B3501_IPSINVHHY_pp65_CMV_binder', 'A2402_AYAQKIFKI_IE-1_CMV_binder', 'A2402_QYDPVAALF_pp65_CMV_binder', 'B0702_QPRAPIRPI_EBNA-6_EBV_binder', 'B0702_TPRVTGGGAM_pp65_CMV_binder', 'B0702_RPPIFIRRL_EBNA-3A_EBV_binder', 'B0702_RPHERNGFTVL_pp65_CMV_binder', 'B0801_RAKFKQLL_BZLF1_EBV_binder', 'B0801_ELRRKMMYM_IE-1_CMV_binder', 'B0801_FLRGRAYGL_EBNA-3A_EBV_binder', 'A0101_SLEGGGLGY_NC_binder', 'A0101_STEGGGLAY_NC_binder', 'A0201_ALIAPVHAV_NC_binder', 'A2402_AYSSAGASI_NC_binder', 'B0702_GPAESAAGL_NC_binder', 'NR(B0801)_AAKGRGAAL_NC_binder']
def load_receptors(base_dir, pmhc): receptors = {} for subject in ['1', '2', '3', '4']: barcodes = {} path_csv = ((((base_dir + '/') + 'vdj_v1_hs_aggregated_donor') + subject) + '_all_contig_annotations.csv') with open(path_csv, 'r') as stream: reader = csv.DictReader(stream, delimiter=',') for row in reader: barcode = row['barcode'] if (barcode not in barcodes): barcodes[barcode] = [] cdr3 = row['cdr3'] vgene = row['v_gene'] jgene = row['j_gene'] if (('None' not in cdr3) and ('*' not in cdr3) and ('None' not in vgene) and ('None' not in jgene)): barcodes[barcode].append({'chain': row['chain'], 'cdr3': cdr3, 'vgene': vgene, 'jgene': jgene, 'full': (True if ('TRUE' in row['full_length']) else False)}) path_csv = ((((base_dir + '/') + 'vdj_v1_hs_aggregated_donor') + subject) + '_binarized_matrix.csv') with open(path_csv, 'r') as stream: reader = csv.DictReader(stream, delimiter=',') for row in reader: if ('True' in row[pmhc]): pairings = [] barcode = row['barcode'] for sequence_tra in barcodes[barcode]: if ('TRA' in sequence_tra['chain']): for sequence_trb in barcodes[barcode]: if ('TRB' in sequence_trb['chain']): pairings.append(((((((((((sequence_tra['vgene'] + ':') + sequence_tra['cdr3']) + ':') + sequence_tra['jgene']) + ':') + sequence_trb['vgene']) + ':') + sequence_trb['cdr3']) + ':') + sequence_trb['jgene'])) for pairing in pairings: if (pairing not in receptors): receptors[pairing] = 1.0 else: receptors[pairing] += 1.0 return receptors
def normalize_sample(receptors): total_count = np.float64(0.0) for quantity in receptors.values(): total_count += quantity for receptor in receptors.keys(): receptors[receptor] /= total_count return receptors
def collapse_samples(samples, labels): receptors_collapse = {} for (i, (receptors, label)) in enumerate(zip(samples, labels)): for (receptor, quantity) in receptors.items(): if (receptor not in receptors_collapse): receptors_collapse[receptor] = {} if (label not in receptors_collapse[receptor]): receptors_collapse[receptor][label] = quantity else: receptors_collapse[receptor][label] += quantity print('WARNING: Duplicate label for the same receptor') return receptors_collapse
def split_dataset(receptors, ratios): rs = np.array(ratios, dtype=np.float64) ss = (rs / np.sum(rs)) cs = np.cumsum(ss) ps = np.pad(cs, [1, 0], 'constant', constant_values=0) keys = list(receptors.keys()) np.random.shuffle(keys) keys_split = [] for i in range(len(ratios)): (j1, j2) = (len(keys) * ps[i:(i + 2)]).astype(int) keys_split.append(keys[j1:j2]) receptors_split = [] for keys in keys_split: receptor_split = {} for key in keys: receptor_split[key] = receptors[key] receptors_split.append(receptor_split) return receptors_split
def insert_receptors(path_db, name, receptors, max_cdr3_length=32): labels = set() for quantities in receptors.values(): labels.update(quantities.keys()) labels = sorted(list(labels)) dtype_receptor = ([('tra_vgene', 'S16'), ('tra_cdr3', ('S' + str(max_cdr3_length))), ('tra_jgene', 'S16'), ('trb_vgene', 'S16'), ('trb_cdr3', ('S' + str(max_cdr3_length))), ('trb_jgene', 'S16')] + [(('frequency_' + label), 'f8') for label in labels]) rs = np.zeros(len(receptors), dtype=dtype_receptor) for (i, (receptor, quantities)) in enumerate(receptors.items()): (tra_vgene, tra_cdr3, tra_jgene, trb_vgene, trb_cdr3, trb_jgene) = receptor.split(':') rs[i]['tra_vgene'] = tra_vgene rs[i]['tra_cdr3'] = tra_cdr3 rs[i]['tra_jgene'] = tra_jgene rs[i]['trb_vgene'] = trb_vgene rs[i]['trb_cdr3'] = trb_cdr3 rs[i]['trb_jgene'] = trb_jgene for label in quantities.keys(): rs[i][('frequency_' + label)] = quantities[label] flag = ('r+' if os.path.isfile(path_db) else 'w') with h5py.File(path_db, flag) as db: rs_db = db.create_dataset(name, (rs.size,), dtype_receptor) rs_db[:] = rs
class Alignment(Layer): def __init__(self, filters, weight_steps, penalties_feature=0.0, penalties_filter=0.0, length_normalize=False, kernel_initializer='uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): self.filters = filters self.weight_steps = weight_steps self.penalties_feature = penalties_feature self.penalties_filter = penalties_filter self.length_normalize = length_normalize self.kernel_initializer = kernel_initializer self.bias_initializer = bias_initializer self.kernel_regularizer = kernel_regularizer self.bias_regularizer = bias_regularizer self.kernel_constraint = kernel_constraint self.bias_constraint = bias_constraint super(__class__, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(name='kernel', shape=[self.weight_steps, int(input_shape[2]), self.filters], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True) self.bias = self.add_weight(name='bias', shape=[self.filters], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True) super(__class__, self).build(input_shape) def compute_mask(self, inputs, mask=None): if (mask is None): return mask return K.any(mask, axis=1) def call(self, inputs, mask=None): scores = alignment_score(inputs, mask, self.kernel, penalties_feature=self.penalties_feature, penalties_weight=self.penalties_filter) if self.length_normalize: lengths_feature = K.sum(K.cast(mask, dtype=inputs.dtype), axis=1, keepdims=True) lengths_weight = K.cast(self.weight_steps, inputs.dtype) lengths = K.minimum(lengths_feature, lengths_weight) scores = (scores / K.sqrt(lengths)) logits = (scores + self.bias) return logits
class Length(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def compute_mask(self, inputs, mask=None): if (mask is None): return mask return K.any(mask, axis=1) def call(self, inputs, mask=None): lengths = K.sum(K.cast(mask, dtype=inputs.dtype), axis=1, keepdims=True) return lengths
class NormalizeInitialization(Layer): def __init__(self, epsilon=1e-05, **kwargs): self.epsilon = epsilon super(__class__, self).__init__(**kwargs) def build(self, input_shape): (input_shape, _) = input_shape self.counter = self.add_weight(name='counter', shape=[1], initializer=Zeros(), trainable=False) self.mean = self.add_weight(name='mean', shape=input_shape[1:], initializer=Zeros(), trainable=False) self.variance = self.add_weight(name='variance', shape=input_shape[1:], initializer=Ones(), trainable=False) super(__class__, self).build(input_shape) def compute_mask(self, inputs, mask=None): return None def call(self, inputs): (inputs, weights) = inputs weights = (weights / tf.reduce_sum(weights)) weights_expand = tf.expand_dims(weights, axis=1) (mean, variance) = tf.nn.weighted_moments(inputs, [0], weights_expand) counter = K.update_add(self.counter, K.ones_like(self.counter)) init = K.sign((counter - K.ones_like(counter))) mean = K.update(self.mean, ((init * self.mean) + ((1.0 - init) * mean))) variance = K.update(self.variance, ((init * self.variance) + ((1.0 - init) * variance))) mean_expand = tf.expand_dims(mean, axis=0) variance_expand = tf.expand_dims(variance, axis=0) outputs = ((inputs - mean_expand) / tf.sqrt((variance_expand + self.epsilon))) return outputs
def load_similarity_matrix(filename): similarity_matrix = {} reader = csv.DictReader(open(filename, 'r')) entries = [] for row in reader: entries.append(row) for k in reader.fieldnames: if (len(k) < 1): continue similarity_matrix[k] = [float(obj[k]) for obj in entries] return similarity_matrix
def print_matrix(m, cdr3): max_col = len(cdr3) print((' %11s' % ''), end='') for col in range(0, max_col): print((' %11s' % cdr3[col]), end='') print('') for row in range(0, 33): for col in range(0, (max_col + 1)): print((' %11.4f' % m[row][col]), end='') print('')
def print_bp(bp, cdr3): max_col = len(cdr3) print((' %11s' % ''), end='') for col in range(0, max_col): print((' %11s' % cdr3[col]), end='') print('') for row in range(0, 33): for col in range(0, (max_col + 1)): print((' %11s' % bp[row][col]), end='') print('')
def print_alignment(bp, cdr3): cdr3_align = [] theta_align = [] max_col = len(cdr3) col = max_col row = 32 done = False while (not done): if (bp[row][col] == 'diag'): theta_align.append(row) cdr3_align.append(cdr3[(col - 1)]) row -= 1 col -= 1 elif (bp[row][col] == 'up'): theta_align.append(row) cdr3_align.append('.') row -= 1 elif (bp[row][col] == 'left'): theta_align.append('.') cdr3_align.append(cdr3[(col - 1)]) col -= 1 else: print('ERROR') if ((row <= 0) or (col <= 0)): done = True if (row != 0): for i in range(row, 0, (- 1)): theta_align.append(i) cdr3_align.append('.') align_str = '' for c in list(reversed(cdr3_align)): align_str += c return align_str
def do_alignment(sm, cdr3): theta_gap = 0 cdr3_gap = (- 1000) am = [] bp = [] for row in range(0, 33): am.append([0.0 for col in range(0, 33)]) bp.append([None for col in range(0, 33)]) max_col = (len(cdr3) + 1) score = 0 for row in range(0, 33): am[row][0] = score score += theta_gap score = 0 for col in range(0, max_col): am[0][col] = score score += cdr3_gap for col in range(1, max_col): cdr3_pos = (col - 1) for row in range(1, 33): theta_pos = (row - 1) up = (am[(row - 1)][col] + theta_gap) diag = (am[(row - 1)][(col - 1)] + sm[cdr3[cdr3_pos]][theta_pos]) left = (am[row][(col - 1)] + cdr3_gap) if (up > diag): if (up > left): am[row][col] = up bp[row][col] = 'up' else: am[row][col] = left bp[row][col] = 'left' elif (diag > left): am[row][col] = diag bp[row][col] = 'diag' else: am[row][col] = left bp[row][col] = 'left' return [am, bp]
def do_file_alignment(input, output, sm_tra, sm_trb, tag): reader = csv.DictReader(open(input, 'r')) fieldnames = reader.fieldnames.copy() fieldnames.append(('tra_alignment_' + tag)) fieldnames.append(('tra_score_' + tag)) fieldnames.append(('trb_alignment_' + tag)) fieldnames.append(('trb_score_' + tag)) writer = csv.DictWriter(open(output, 'w'), fieldnames=fieldnames) writer.writeheader() for row in reader: r = 32 col = len(row['tra_cdr3']) tra_align = do_alignment(sm_tra, row['tra_cdr3']) row[('tra_alignment_' + tag)] = print_alignment(tra_align[1], row['tra_cdr3']) row[('tra_score_' + tag)] = (tra_align[0][r][col] / math.sqrt(float(col))) col = len(row['trb_cdr3']) trb_align = do_alignment(sm_trb, row['trb_cdr3']) row[('trb_alignment_' + tag)] = print_alignment(trb_align[1], row['trb_cdr3']) row[('trb_score_' + tag)] = (trb_align[0][r][col] / math.sqrt(float(col))) writer.writerow(row)
def test_alignment(sm, cdr3): align = do_alignment(sm, cdr3) print_matrix(align[0], cdr3) print_bp(align[1], cdr3) print(print_alignment(align[1], cdr3))
class GlobalPoolWithMask(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def compute_mask(self, inputs, mask=None): return tf.reduce_any(mask, axis=1) def call(self, inputs, mask=None): indicators = tf.expand_dims(tf.cast(mask, dtype=inputs.dtype), axis=2) penalties = ((- 1e+16) * (1.0 - indicators)) outputs = tf.reduce_max((inputs + penalties), axis=1) return outputs
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs): kmer_size = 4 features_tra_cdr3 = Input(shape=input_shape_tra_cdr3) features_tra_vgene = Input(shape=input_shape_tra_vgene) features_tra_jgene = Input(shape=input_shape_tra_jgene) features_trb_cdr3 = Input(shape=input_shape_trb_cdr3) features_trb_vgene = Input(shape=input_shape_trb_vgene) features_trb_jgene = Input(shape=input_shape_trb_jgene) weights = Input(shape=[]) features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3) features_tra_length = Length()(features_tra_mask) logits_tra_cdr3 = Conv1D(num_outputs, kmer_size)(features_tra_cdr3) logits_tra_cdr3_mask = MaskCopy(trim_front=(kmer_size - 1))([logits_tra_cdr3, features_tra_mask]) logits_tra_cdr3_pool = GlobalPoolWithMask()(logits_tra_cdr3_mask) logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3_pool, weights]) logits_tra_length = Dense(num_outputs)(features_tra_length) logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights]) logits_tra_vgene = Dense(num_outputs)(features_tra_vgene) logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights]) logits_tra_jgene = Dense(num_outputs)(features_tra_jgene) logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights]) features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3) features_trb_length = Length()(features_trb_mask) logits_trb_cdr3 = Conv1D(num_outputs, kmer_size)(features_trb_cdr3) logits_trb_cdr3_mask = MaskCopy(trim_front=(kmer_size - 1))([logits_trb_cdr3, features_trb_mask]) logits_trb_cdr3_pool = GlobalPoolWithMask()(logits_trb_cdr3_mask) logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3_pool, weights]) logits_trb_length = Dense(num_outputs)(features_trb_length) logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights]) logits_trb_vgene = Dense(num_outputs)(features_trb_vgene) logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights]) logits_trb_jgene = Dense(num_outputs)(features_trb_jgene) logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights]) logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm]) logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights]) model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm) return model
class GlobalPoolWithMask(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def compute_mask(self, inputs, mask=None): return tf.reduce_any(mask, axis=1) def call(self, inputs, mask=None): indicators = tf.expand_dims(tf.cast(mask, dtype=inputs.dtype), axis=2) penalties = ((- 1e+16) * (1.0 - indicators)) outputs = tf.reduce_max((inputs + penalties), axis=1) return outputs
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs): kmer_size = 4 features_tra_cdr3 = Input(shape=input_shape_tra_cdr3) features_tra_vgene = Input(shape=input_shape_tra_vgene) features_tra_jgene = Input(shape=input_shape_tra_jgene) features_trb_cdr3 = Input(shape=input_shape_trb_cdr3) features_trb_vgene = Input(shape=input_shape_trb_vgene) features_trb_jgene = Input(shape=input_shape_trb_jgene) weights = Input(shape=[]) features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3) features_tra_length = Length()(features_tra_mask) logits_tra_cdr3 = Conv1D(8, kmer_size)(features_tra_cdr3) logits_tra_cdr3 = Conv1D(num_outputs, kmer_size)(logits_tra_cdr3) logits_tra_cdr3_mask = MaskCopy(trim_front=((2 * kmer_size) - 2))([logits_tra_cdr3, features_tra_mask]) logits_tra_cdr3_pool = GlobalPoolWithMask()(logits_tra_cdr3_mask) logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3_pool, weights]) logits_tra_length = Dense(num_outputs)(features_tra_length) logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights]) logits_tra_vgene = Dense(num_outputs)(features_tra_vgene) logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights]) logits_tra_jgene = Dense(num_outputs)(features_tra_jgene) logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights]) features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3) features_trb_length = Length()(features_trb_mask) logits_trb_cdr3 = Conv1D(8, kmer_size)(features_trb_cdr3) logits_trb_cdr3 = Conv1D(num_outputs, kmer_size)(logits_trb_cdr3) logits_trb_cdr3_mask = MaskCopy(trim_front=((2 * kmer_size) - 2))([logits_trb_cdr3, features_tra_mask]) logits_trb_cdr3_pool = GlobalPoolWithMask()(logits_trb_cdr3_mask) logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3_pool, weights]) logits_trb_length = Dense(num_outputs)(features_trb_length) logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights]) logits_trb_vgene = Dense(num_outputs)(features_trb_vgene) logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights]) logits_trb_jgene = Dense(num_outputs)(features_trb_jgene) logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights]) logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm]) logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights]) model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm) return model
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs, num_steps): kmer_size = 5 features_tra_cdr3 = Input(shape=input_shape_tra_cdr3) features_tra_vgene = Input(shape=input_shape_tra_vgene) features_tra_jgene = Input(shape=input_shape_tra_jgene) features_trb_cdr3 = Input(shape=input_shape_trb_cdr3) features_trb_vgene = Input(shape=input_shape_trb_vgene) features_trb_jgene = Input(shape=input_shape_trb_jgene) weights = Input(shape=[]) features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3) features_tra_length = Length()(features_tra_mask) features_tra_kmer = KMer(kmer_size)(features_tra_mask) logits_tra_kmer = Alignment(num_outputs, ((num_steps - kmer_size) + 1), penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_tra_kmer) logits_tra_kmer_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_kmer, weights]) logits_tra_length = Dense(num_outputs)(features_tra_length) logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights]) logits_tra_vgene = Dense(num_outputs)(features_tra_vgene) logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights]) logits_tra_jgene = Dense(num_outputs)(features_tra_jgene) logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights]) features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3) features_trb_length = Length()(features_trb_mask) features_trb_kmer = KMer(kmer_size)(features_trb_mask) logits_trb_kmer = Alignment(num_outputs, ((num_steps - kmer_size) + 1), penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_trb_kmer) logits_trb_kmer_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_kmer, weights]) logits_trb_length = Dense(num_outputs)(features_trb_length) logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights]) logits_trb_vgene = Dense(num_outputs)(features_trb_vgene) logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights]) logits_trb_jgene = Dense(num_outputs)(features_trb_jgene) logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights]) logits = Add()([logits_tra_kmer_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_kmer_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm]) logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights]) model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm) return model
def handcrafted_features(data, tags): basicity = {'A': 206.4, 'B': 210.7, 'C': 206.2, 'D': 208.6, 'E': 215.6, 'F': 212.1, 'G': 202.7, 'H': 223.7, 'I': 210.8, 'K': 221.8, 'L': 209.6, 'M': 213.3, 'N': 212.8, 'P': 214.4, 'Q': 214.2, 'R': 237.0, 'S': 207.6, 'T': 211.7, 'V': 208.7, 'W': 216.1, 'X': 210.2, 'Y': 213.1, 'Z': 214.9} hydrophobicity = {'A': 0.16, 'B': (- 3.14), 'C': 2.5, 'D': (- 2.49), 'E': (- 1.5), 'F': 5.0, 'G': (- 3.31), 'H': (- 4.63), 'I': 4.41, 'K': (- 5.0), 'L': 4.76, 'M': 3.23, 'N': (- 3.79), 'P': (- 4.92), 'Q': (- 2.76), 'R': (- 2.77), 'S': (- 2.85), 'T': (- 1.08), 'V': 3.02, 'W': 4.88, 'X': 4.59, 'Y': 2.0, 'Z': (- 2.13)} helicity = {'A': 1.24, 'B': 0.92, 'C': 0.79, 'D': 0.89, 'E': 0.85, 'F': 1.26, 'G': 1.15, 'H': 0.97, 'I': 1.29, 'K': 0.88, 'L': 1.28, 'M': 1.22, 'N': 0.94, 'P': 0.57, 'Q': 0.96, 'R': 0.95, 'S': 1.0, 'T': 1.09, 'V': 1.27, 'W': 1.07, 'X': 1.29, 'Y': 1.11, 'Z': 0.91} mutation_stability = {'A': 13, 'C': 52, 'D': 11, 'E': 12, 'F': 32, 'G': 27, 'H': 15, 'I': 10, 'K': 24, 'L': 34, 'M': 6, 'N': 6, 'P': 20, 'Q': 10, 'R': 17, 'S': 10, 'T': 11, 'V': 17, 'W': 55, 'Y': 31} features_list = [] for chain in ['tra', 'trb']: onehot_encoder = feature_extraction.DictVectorizer(sparse=False) features_list.append(pd.DataFrame(onehot_encoder.fit_transform(data[[(chain + '_vgene'), (chain + '_jgene')]].to_dict(orient='records')), columns=onehot_encoder.feature_names_)) features_list.append(data[(chain + '_cdr3')].apply((lambda sequence: parser.length(sequence))).to_frame().rename(columns={(chain + '_cdr3'): 'length'})) aa_counts = pd.DataFrame.from_records([parser.amino_acid_composition(sequence) for sequence in data[(chain + '_cdr3')]]).fillna(0) aa_counts.columns = [(chain + '_count_{}'.format(column)) for column in aa_counts.columns] features_list.append(aa_counts) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([basicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_basicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([hydrophobicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_hydrophobicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([helicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_helicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: electrochem.pI(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'pI'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([mutation_stability[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_mutation_stability'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: mass.fast_mass(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'mass'})) (pos_aa, pos_basicity, pos_hydro, pos_helicity, pos_pI, pos_mutation) = [[] for _ in range(6)] for sequence in data[(chain + '_cdr3')]: length = parser.length(sequence) start_pos = ((- 1) * (length // 2)) pos_range = (list(range(start_pos, (start_pos + length))) if ((length % 2) == 1) else (list(range(start_pos, 0)) + list(range(1, ((start_pos + length) + 1))))) pos_aa.append({(chain + '_pos_{}_{}'.format(pos, aa)): 1 for (pos, aa) in zip(pos_range, sequence)}) pos_basicity.append({(chain + '_pos_{}_basicity'.format(pos)): basicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_hydro.append({(chain + '_pos_{}_hydrophobicity'.format(pos)): hydrophobicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_helicity.append({(chain + '_pos_{}_helicity'.format(pos)): helicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_pI.append({(chain + '_pos_{}_pI'.format(pos)): electrochem.pI(aa) for (pos, aa) in zip(pos_range, sequence)}) pos_mutation.append({(chain + '_pos_{}_mutation_stability'.format(pos)): mutation_stability[aa] for (pos, aa) in zip(pos_range, sequence)}) features_list.append(pd.DataFrame.from_records(pos_aa).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_basicity).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_hydro).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_helicity).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_pI).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_mutation).fillna(0)) features_list.append(data['weights']) for tag in tags: features_list.append(data[('labels_' + tag)]) features_list.append(data['split']) data_processed = pd.concat(features_list, axis=1) return data_processed
def load_datasets(path_db, splits, tags, uniform=False, permute=False): num_categories = len(tags) receptors_dict = {} for split in splits: with h5py.File(path_db, 'r') as db: receptors = db[split][...] weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] indices = np.argwhere((weights > 0.0)).flatten() receptors = receptors[indices] if uniform: for tag in tags: receptors[('frequency_' + tag)] = np.sign(receptors[('frequency_' + tag)]) for tag in tags: receptors[('frequency_' + tag)] /= np.sum(receptors[('frequency_' + tag)]) if ('tra_vgene' not in receptors_dict): receptors_dict['tra_vgene'] = np.char.decode(receptors['tra_vgene']) receptors_dict['tra_cdr3'] = np.char.decode(receptors['tra_cdr3']) receptors_dict['tra_jgene'] = np.char.decode(receptors['tra_jgene']) receptors_dict['trb_vgene'] = np.char.decode(receptors['trb_vgene']) receptors_dict['trb_cdr3'] = np.char.decode(receptors['trb_cdr3']) receptors_dict['trb_jgene'] = np.char.decode(receptors['trb_jgene']) weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] weights /= num_categories receptors_dict['weights'] = weights for (j, tag) in enumerate(tags): receptors_dict[('labels_' + tag)] = (receptors[('frequency_' + tag)] / (num_categories * weights)) receptors_dict['split'] = [split for i in range(receptors.size)] else: receptors_dict['tra_vgene'] = np.concatenate([receptors_dict['tra_vgene'], np.char.decode(receptors['tra_vgene'])], axis=0) receptors_dict['tra_cdr3'] = np.concatenate([receptors_dict['tra_cdr3'], np.char.decode(receptors['tra_cdr3'])], axis=0) receptors_dict['tra_jgene'] = np.concatenate([receptors_dict['tra_jgene'], np.char.decode(receptors['tra_jgene'])], axis=0) receptors_dict['trb_vgene'] = np.concatenate([receptors_dict['trb_vgene'], np.char.decode(receptors['trb_vgene'])], axis=0) receptors_dict['trb_cdr3'] = np.concatenate([receptors_dict['trb_cdr3'], np.char.decode(receptors['trb_cdr3'])], axis=0) receptors_dict['trb_jgene'] = np.concatenate([receptors_dict['trb_jgene'], np.char.decode(receptors['trb_jgene'])], axis=0) weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] weights /= num_categories receptors_dict['weights'] = np.concatenate([receptors_dict['weights'], weights], axis=0) for (j, tag) in enumerate(tags): receptors_dict[('labels_' + tag)] = np.concatenate([receptors_dict[('labels_' + tag)], (receptors[('frequency_' + tag)] / (num_categories * weights))], axis=0) receptors_dict['split'] = np.concatenate([receptors_dict['split'], [split for i in range(receptors.size)]], axis=0) data = pd.DataFrame(receptors_dict) data_processed = handcrafted_features(data, tags) outputs_list = [] for split in splits: conditions = (data_processed['split'] == split) data_split = data_processed[conditions] data_split.drop('split', axis=1) features_split = data_split.drop((['weights', 'split'] + [('labels_' + tag) for tag in tags]), axis=1) labels_split = data_split[[('labels_' + tag) for tag in tags]] weights_split = data_split['weights'] xs_split = features_split.to_numpy() ys_split = labels_split.to_numpy() ws_split = weights_split.to_numpy() if permute: indices = np.arange(xs_split.shape[0]) np.random.shuffle(indices) xs_split = xs_split[indices] outputs_list.append(xs_split) outputs_list.append(ys_split) outputs_list.append(ws_split) return outputs_list
def balanced_sampling(xs, ys, ws, batch_size): rs = np.arange(xs.shape[0]) ws_ = (ws / np.sum(ws)) while True: js = np.random.choice(rs, size=batch_size, p=ws_) (yield (xs[js], ys[js]))
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs): features_tra_cdr3 = Input(shape=input_shape_tra_cdr3) features_tra_vgene = Input(shape=input_shape_tra_vgene) features_tra_jgene = Input(shape=input_shape_tra_jgene) features_trb_cdr3 = Input(shape=input_shape_trb_cdr3) features_trb_vgene = Input(shape=input_shape_trb_vgene) features_trb_jgene = Input(shape=input_shape_trb_jgene) weights = Input(shape=[]) features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3) features_tra_length = Length()(features_tra_mask) logits_tra_cdr3 = GRU(num_outputs)(features_tra_mask) logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3, weights]) logits_tra_length = Dense(num_outputs)(features_tra_length) logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights]) logits_tra_vgene = Dense(num_outputs)(features_tra_vgene) logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights]) logits_tra_jgene = Dense(num_outputs)(features_tra_jgene) logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights]) features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3) features_trb_length = Length()(features_trb_mask) logits_trb_cdr3 = GRU(num_outputs)(features_trb_mask) logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3, weights]) logits_trb_length = Dense(num_outputs)(features_trb_length) logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights]) logits_trb_vgene = Dense(num_outputs)(features_trb_vgene) logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights]) logits_trb_jgene = Dense(num_outputs)(features_trb_jgene) logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights]) logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm]) logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights]) model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm) return model
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs): features_tra_cdr3 = Input(shape=input_shape_tra_cdr3) features_tra_vgene = Input(shape=input_shape_tra_vgene) features_tra_jgene = Input(shape=input_shape_tra_jgene) features_trb_cdr3 = Input(shape=input_shape_trb_cdr3) features_trb_vgene = Input(shape=input_shape_trb_vgene) features_trb_jgene = Input(shape=input_shape_trb_jgene) weights = Input(shape=[]) features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3) features_tra_length = Length()(features_tra_mask) logits_tra_cdr3 = LSTM(num_outputs)(features_tra_mask) logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3, weights]) logits_tra_length = Dense(num_outputs)(features_tra_length) logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights]) logits_tra_vgene = Dense(num_outputs)(features_tra_vgene) logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights]) logits_tra_jgene = Dense(num_outputs)(features_tra_jgene) logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights]) features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3) features_trb_length = Length()(features_trb_mask) logits_trb_cdr3 = LSTM(num_outputs)(features_trb_mask) logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3, weights]) logits_trb_length = Dense(num_outputs)(features_trb_length) logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights]) logits_trb_vgene = Dense(num_outputs)(features_trb_vgene) logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights]) logits_trb_jgene = Dense(num_outputs)(features_trb_jgene) logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights]) logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm]) logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights]) model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm) return model
def handcrafted_features(data, tags): basicity = {'A': 206.4, 'B': 210.7, 'C': 206.2, 'D': 208.6, 'E': 215.6, 'F': 212.1, 'G': 202.7, 'H': 223.7, 'I': 210.8, 'K': 221.8, 'L': 209.6, 'M': 213.3, 'N': 212.8, 'P': 214.4, 'Q': 214.2, 'R': 237.0, 'S': 207.6, 'T': 211.7, 'V': 208.7, 'W': 216.1, 'X': 210.2, 'Y': 213.1, 'Z': 214.9} hydrophobicity = {'A': 0.16, 'B': (- 3.14), 'C': 2.5, 'D': (- 2.49), 'E': (- 1.5), 'F': 5.0, 'G': (- 3.31), 'H': (- 4.63), 'I': 4.41, 'K': (- 5.0), 'L': 4.76, 'M': 3.23, 'N': (- 3.79), 'P': (- 4.92), 'Q': (- 2.76), 'R': (- 2.77), 'S': (- 2.85), 'T': (- 1.08), 'V': 3.02, 'W': 4.88, 'X': 4.59, 'Y': 2.0, 'Z': (- 2.13)} helicity = {'A': 1.24, 'B': 0.92, 'C': 0.79, 'D': 0.89, 'E': 0.85, 'F': 1.26, 'G': 1.15, 'H': 0.97, 'I': 1.29, 'K': 0.88, 'L': 1.28, 'M': 1.22, 'N': 0.94, 'P': 0.57, 'Q': 0.96, 'R': 0.95, 'S': 1.0, 'T': 1.09, 'V': 1.27, 'W': 1.07, 'X': 1.29, 'Y': 1.11, 'Z': 0.91} mutation_stability = {'A': 13, 'C': 52, 'D': 11, 'E': 12, 'F': 32, 'G': 27, 'H': 15, 'I': 10, 'K': 24, 'L': 34, 'M': 6, 'N': 6, 'P': 20, 'Q': 10, 'R': 17, 'S': 10, 'T': 11, 'V': 17, 'W': 55, 'Y': 31} features_list = [] for chain in ['tra', 'trb']: onehot_encoder = feature_extraction.DictVectorizer(sparse=False) features_list.append(pd.DataFrame(onehot_encoder.fit_transform(data[[(chain + '_vgene'), (chain + '_jgene')]].to_dict(orient='records')), columns=onehot_encoder.feature_names_)) features_list.append(data[(chain + '_cdr3')].apply((lambda sequence: parser.length(sequence))).to_frame().rename(columns={(chain + '_cdr3'): 'length'})) aa_counts = pd.DataFrame.from_records([parser.amino_acid_composition(sequence) for sequence in data[(chain + '_cdr3')]]).fillna(0) aa_counts.columns = [(chain + '_count_{}'.format(column)) for column in aa_counts.columns] features_list.append(aa_counts) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([basicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_basicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([hydrophobicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_hydrophobicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([helicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_helicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: electrochem.pI(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'pI'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([mutation_stability[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_mutation_stability'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: mass.fast_mass(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'mass'})) (pos_aa, pos_basicity, pos_hydro, pos_helicity, pos_pI, pos_mutation) = [[] for _ in range(6)] for sequence in data[(chain + '_cdr3')]: length = parser.length(sequence) start_pos = ((- 1) * (length // 2)) pos_range = (list(range(start_pos, (start_pos + length))) if ((length % 2) == 1) else (list(range(start_pos, 0)) + list(range(1, ((start_pos + length) + 1))))) pos_aa.append({(chain + '_pos_{}_{}'.format(pos, aa)): 1 for (pos, aa) in zip(pos_range, sequence)}) pos_basicity.append({(chain + '_pos_{}_basicity'.format(pos)): basicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_hydro.append({(chain + '_pos_{}_hydrophobicity'.format(pos)): hydrophobicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_helicity.append({(chain + '_pos_{}_helicity'.format(pos)): helicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_pI.append({(chain + '_pos_{}_pI'.format(pos)): electrochem.pI(aa) for (pos, aa) in zip(pos_range, sequence)}) pos_mutation.append({(chain + '_pos_{}_mutation_stability'.format(pos)): mutation_stability[aa] for (pos, aa) in zip(pos_range, sequence)}) features_list.append(pd.DataFrame.from_records(pos_aa).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_basicity).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_hydro).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_helicity).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_pI).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_mutation).fillna(0)) features_list.append(data['weights']) for tag in tags: features_list.append(data[('labels_' + tag)]) features_list.append(data['split']) data_processed = pd.concat(features_list, axis=1) return data_processed
def load_datasets(path_db, splits, tags, uniform=False, permute=False): num_categories = len(tags) receptors_dict = {} for split in splits: with h5py.File(path_db, 'r') as db: receptors = db[split][...] weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] indices = np.argwhere((weights > 0.0)).flatten() receptors = receptors[indices] if uniform: for tag in tags: receptors[('frequency_' + tag)] = np.sign(receptors[('frequency_' + tag)]) for tag in tags: receptors[('frequency_' + tag)] /= np.sum(receptors[('frequency_' + tag)]) if ('tra_vgene' not in receptors_dict): receptors_dict['tra_vgene'] = np.char.decode(receptors['tra_vgene']) receptors_dict['tra_cdr3'] = np.char.decode(receptors['tra_cdr3']) receptors_dict['tra_jgene'] = np.char.decode(receptors['tra_jgene']) receptors_dict['trb_vgene'] = np.char.decode(receptors['trb_vgene']) receptors_dict['trb_cdr3'] = np.char.decode(receptors['trb_cdr3']) receptors_dict['trb_jgene'] = np.char.decode(receptors['trb_jgene']) weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] weights /= num_categories receptors_dict['weights'] = weights for (j, tag) in enumerate(tags): receptors_dict[('labels_' + tag)] = (receptors[('frequency_' + tag)] / (num_categories * weights)) receptors_dict['split'] = [split for i in range(receptors.size)] else: receptors_dict['tra_vgene'] = np.concatenate([receptors_dict['tra_vgene'], np.char.decode(receptors['tra_vgene'])], axis=0) receptors_dict['tra_cdr3'] = np.concatenate([receptors_dict['tra_cdr3'], np.char.decode(receptors['tra_cdr3'])], axis=0) receptors_dict['tra_jgene'] = np.concatenate([receptors_dict['tra_jgene'], np.char.decode(receptors['tra_jgene'])], axis=0) receptors_dict['trb_vgene'] = np.concatenate([receptors_dict['trb_vgene'], np.char.decode(receptors['trb_vgene'])], axis=0) receptors_dict['trb_cdr3'] = np.concatenate([receptors_dict['trb_cdr3'], np.char.decode(receptors['trb_cdr3'])], axis=0) receptors_dict['trb_jgene'] = np.concatenate([receptors_dict['trb_jgene'], np.char.decode(receptors['trb_jgene'])], axis=0) weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] weights /= num_categories receptors_dict['weights'] = np.concatenate([receptors_dict['weights'], weights], axis=0) for (j, tag) in enumerate(tags): receptors_dict[('labels_' + tag)] = np.concatenate([receptors_dict[('labels_' + tag)], (receptors[('frequency_' + tag)] / (num_categories * weights))], axis=0) receptors_dict['split'] = np.concatenate([receptors_dict['split'], [split for i in range(receptors.size)]], axis=0) data = pd.DataFrame(receptors_dict) data_processed = handcrafted_features(data, tags) outputs_list = [] for split in splits: conditions = (data_processed['split'] == split) data_split = data_processed[conditions] data_split.drop('split', axis=1) features_split = data_split.drop((['weights', 'split'] + [('labels_' + tag) for tag in tags]), axis=1) labels_split = data_split[[('labels_' + tag) for tag in tags]] weights_split = data_split['weights'] xs_split = features_split.to_numpy() ys_split = labels_split.to_numpy() ws_split = weights_split.to_numpy() if permute: indices = np.arange(xs_split.shape[0]) np.random.shuffle(indices) xs_split = xs_split[indices] outputs_list.append(xs_split) outputs_list.append(ys_split) outputs_list.append(ws_split) return outputs_list
def balanced_sampling(xs, ys, ws, batch_size): rs = np.arange(xs.shape[0]) ws_ = (ws / np.sum(ws)) while True: js = np.random.choice(rs, size=batch_size, p=ws_) (yield (xs[js], ys[js]))
def handcrafted_features(data, tags): basicity = {'A': 206.4, 'B': 210.7, 'C': 206.2, 'D': 208.6, 'E': 215.6, 'F': 212.1, 'G': 202.7, 'H': 223.7, 'I': 210.8, 'K': 221.8, 'L': 209.6, 'M': 213.3, 'N': 212.8, 'P': 214.4, 'Q': 214.2, 'R': 237.0, 'S': 207.6, 'T': 211.7, 'V': 208.7, 'W': 216.1, 'X': 210.2, 'Y': 213.1, 'Z': 214.9} hydrophobicity = {'A': 0.16, 'B': (- 3.14), 'C': 2.5, 'D': (- 2.49), 'E': (- 1.5), 'F': 5.0, 'G': (- 3.31), 'H': (- 4.63), 'I': 4.41, 'K': (- 5.0), 'L': 4.76, 'M': 3.23, 'N': (- 3.79), 'P': (- 4.92), 'Q': (- 2.76), 'R': (- 2.77), 'S': (- 2.85), 'T': (- 1.08), 'V': 3.02, 'W': 4.88, 'X': 4.59, 'Y': 2.0, 'Z': (- 2.13)} helicity = {'A': 1.24, 'B': 0.92, 'C': 0.79, 'D': 0.89, 'E': 0.85, 'F': 1.26, 'G': 1.15, 'H': 0.97, 'I': 1.29, 'K': 0.88, 'L': 1.28, 'M': 1.22, 'N': 0.94, 'P': 0.57, 'Q': 0.96, 'R': 0.95, 'S': 1.0, 'T': 1.09, 'V': 1.27, 'W': 1.07, 'X': 1.29, 'Y': 1.11, 'Z': 0.91} mutation_stability = {'A': 13, 'C': 52, 'D': 11, 'E': 12, 'F': 32, 'G': 27, 'H': 15, 'I': 10, 'K': 24, 'L': 34, 'M': 6, 'N': 6, 'P': 20, 'Q': 10, 'R': 17, 'S': 10, 'T': 11, 'V': 17, 'W': 55, 'Y': 31} features_list = [] for chain in ['tra', 'trb']: onehot_encoder = feature_extraction.DictVectorizer(sparse=False) features_list.append(pd.DataFrame(onehot_encoder.fit_transform(data[[(chain + '_vgene'), (chain + '_jgene')]].to_dict(orient='records')), columns=onehot_encoder.feature_names_)) features_list.append(data[(chain + '_cdr3')].apply((lambda sequence: parser.length(sequence))).to_frame().rename(columns={(chain + '_cdr3'): 'length'})) aa_counts = pd.DataFrame.from_records([parser.amino_acid_composition(sequence) for sequence in data[(chain + '_cdr3')]]).fillna(0) aa_counts.columns = [(chain + '_count_{}'.format(column)) for column in aa_counts.columns] features_list.append(aa_counts) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([basicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_basicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([hydrophobicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_hydrophobicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([helicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_helicity'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: electrochem.pI(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'pI'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([mutation_stability[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_mutation_stability'})) features_list.append(data[(chain + '_cdr3')].apply((lambda seq: mass.fast_mass(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'mass'})) (pos_aa, pos_basicity, pos_hydro, pos_helicity, pos_pI, pos_mutation) = [[] for _ in range(6)] for sequence in data[(chain + '_cdr3')]: length = parser.length(sequence) start_pos = ((- 1) * (length // 2)) pos_range = (list(range(start_pos, (start_pos + length))) if ((length % 2) == 1) else (list(range(start_pos, 0)) + list(range(1, ((start_pos + length) + 1))))) pos_aa.append({(chain + '_pos_{}_{}'.format(pos, aa)): 1 for (pos, aa) in zip(pos_range, sequence)}) pos_basicity.append({(chain + '_pos_{}_basicity'.format(pos)): basicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_hydro.append({(chain + '_pos_{}_hydrophobicity'.format(pos)): hydrophobicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_helicity.append({(chain + '_pos_{}_helicity'.format(pos)): helicity[aa] for (pos, aa) in zip(pos_range, sequence)}) pos_pI.append({(chain + '_pos_{}_pI'.format(pos)): electrochem.pI(aa) for (pos, aa) in zip(pos_range, sequence)}) pos_mutation.append({(chain + '_pos_{}_mutation_stability'.format(pos)): mutation_stability[aa] for (pos, aa) in zip(pos_range, sequence)}) features_list.append(pd.DataFrame.from_records(pos_aa).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_basicity).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_hydro).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_helicity).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_pI).fillna(0)) features_list.append(pd.DataFrame.from_records(pos_mutation).fillna(0)) features_list.append(data['weights']) for tag in tags: features_list.append(data[('labels_' + tag)]) features_list.append(data['split']) data_processed = pd.concat(features_list, axis=1) return data_processed
def load_datasets(path_db, splits, tags, uniform=False, permute=False): num_categories = len(tags) receptors_dict = {} for split in splits: with h5py.File(path_db, 'r') as db: receptors = db[split][...] weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] indices = np.argwhere((weights > 0.0)).flatten() receptors = receptors[indices] if uniform: for tag in tags: receptors[('frequency_' + tag)] = np.sign(receptors[('frequency_' + tag)]) for tag in tags: receptors[('frequency_' + tag)] /= np.sum(receptors[('frequency_' + tag)]) if ('tra_vgene' not in receptors_dict): receptors_dict['tra_vgene'] = np.char.decode(receptors['tra_vgene']) receptors_dict['tra_cdr3'] = np.char.decode(receptors['tra_cdr3']) receptors_dict['tra_jgene'] = np.char.decode(receptors['tra_jgene']) receptors_dict['trb_vgene'] = np.char.decode(receptors['trb_vgene']) receptors_dict['trb_cdr3'] = np.char.decode(receptors['trb_cdr3']) receptors_dict['trb_jgene'] = np.char.decode(receptors['trb_jgene']) weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] weights /= num_categories receptors_dict['weights'] = weights for (j, tag) in enumerate(tags): receptors_dict[('labels_' + tag)] = (receptors[('frequency_' + tag)] / (num_categories * weights)) receptors_dict['split'] = [split for i in range(receptors.size)] else: receptors_dict['tra_vgene'] = np.concatenate([receptors_dict['tra_vgene'], np.char.decode(receptors['tra_vgene'])], axis=0) receptors_dict['tra_cdr3'] = np.concatenate([receptors_dict['tra_cdr3'], np.char.decode(receptors['tra_cdr3'])], axis=0) receptors_dict['tra_jgene'] = np.concatenate([receptors_dict['tra_jgene'], np.char.decode(receptors['tra_jgene'])], axis=0) receptors_dict['trb_vgene'] = np.concatenate([receptors_dict['trb_vgene'], np.char.decode(receptors['trb_vgene'])], axis=0) receptors_dict['trb_cdr3'] = np.concatenate([receptors_dict['trb_cdr3'], np.char.decode(receptors['trb_cdr3'])], axis=0) receptors_dict['trb_jgene'] = np.concatenate([receptors_dict['trb_jgene'], np.char.decode(receptors['trb_jgene'])], axis=0) weights = 0.0 for tag in tags: weights += receptors[('frequency_' + tag)] weights /= num_categories receptors_dict['weights'] = np.concatenate([receptors_dict['weights'], weights], axis=0) for (j, tag) in enumerate(tags): receptors_dict[('labels_' + tag)] = np.concatenate([receptors_dict[('labels_' + tag)], (receptors[('frequency_' + tag)] / (num_categories * weights))], axis=0) receptors_dict['split'] = np.concatenate([receptors_dict['split'], [split for i in range(receptors.size)]], axis=0) data = pd.DataFrame(receptors_dict) data_processed = handcrafted_features(data, tags) outputs_list = [] for split in splits: conditions = (data_processed['split'] == split) data_split = data_processed[conditions] data_split.drop('split', axis=1) features_split = data_split.drop((['weights', 'split'] + [('labels_' + tag) for tag in tags]), axis=1) labels_split = data_split[[('labels_' + tag) for tag in tags]] weights_split = data_split['weights'] xs_split = features_split.to_numpy() ys_split = labels_split.to_numpy() ws_split = weights_split.to_numpy() if permute: indices = np.arange(xs_split.shape[0]) np.random.shuffle(indices) xs_split = xs_split[indices] outputs_list.append(xs_split) outputs_list.append(ys_split) outputs_list.append(ws_split) return outputs_list
def label_float2int(ys, num_classes): ys_index = np.argmax(ys, axis=1) ys_onehot = np.squeeze(np.eye(num_classes)[ys_index.reshape((- 1))]) ys_hard = ys_onehot.astype(np.int64) return ys_hard
def crossentropy(labels, logits, weights): weights = (weights / tf.reduce_sum(weights)) costs = ((- tf.reduce_sum((labels * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1)) cost = tf.reduce_sum((weights * costs)) return cost
def accuracy(labels, logits, weights): probabilities = tf.math.softmax(logits) weights = (weights / tf.reduce_sum(weights)) corrects = tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(probabilities, axis=1)), probabilities.dtype) accuracy = tf.reduce_sum((weights * corrects)) return accuracy
def find_threshold(labels, logits, weights, target_accuracy): probabilities = tf.math.softmax(logits) weights = (weights / tf.reduce_sum(weights)) entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1)) corrects = tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(probabilities, axis=1)), probabilities.dtype) indices_sorted = tf.argsort(entropies, axis=0) entropies_sorted = tf.gather(entropies, indices_sorted) corrects_sorted = tf.gather(corrects, indices_sorted) weights_sorted = tf.gather(weights, indices_sorted) numerators_sorted = tf.math.cumsum((weights_sorted * corrects_sorted), axis=0) denominators_sorted = tf.math.cumsum(weights_sorted, axis=0) accuracies_sorted = (numerators_sorted / denominators_sorted) range = (tf.math.cumsum(tf.ones_like(accuracies_sorted, dtype=tf.int64), axis=0) - 1) indices_threshold = tf.where((accuracies_sorted > tf.constant(target_accuracy, accuracies_sorted.dtype)), range, tf.zeros_like(range)) index_threshold = tf.reduce_max(indices_threshold) entropy_threshold = tf.gather(entropies_sorted, index_threshold) return entropy_threshold
def accuracy_with_threshold(labels, logits, weights, threshold): probabilities = tf.math.softmax(logits) weights = (weights / tf.reduce_sum(weights)) entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1)) corrects = tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(probabilities, axis=1)), probabilities.dtype) masks = tf.where((entropies <= threshold), tf.ones_like(entropies), tf.zeros_like(entropies)) accuracy_mask = tf.math.divide(tf.reduce_sum(((weights * masks) * corrects)), tf.reduce_sum((weights * masks))) return accuracy_mask
def crossentropy_with_threshold(labels, logits, weights, threshold): probabilities = tf.math.softmax(logits) weights = (weights / tf.reduce_sum(weights)) entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1)) costs = ((- tf.reduce_sum((labels * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1)) masks = tf.where((entropies <= threshold), tf.ones_like(entropies), tf.zeros_like(entropies)) cost_mask = tf.math.divide(tf.reduce_sum(((weights * masks) * costs)), tf.reduce_sum((weights * masks))) return cost_mask
def fraction_with_threshold(logits, weights, threshold): probabilities = tf.math.softmax(logits) weights = (weights / tf.reduce_sum(weights)) entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1)) masks = tf.where((entropies <= threshold), tf.ones_like(entropies), tf.zeros_like(entropies)) fraction_mask = tf.reduce_sum((weights * masks)) return fraction_mask
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs, num_steps): features_tra_cdr3 = Input(shape=input_shape_tra_cdr3) features_tra_vgene = Input(shape=input_shape_tra_vgene) features_tra_jgene = Input(shape=input_shape_tra_jgene) features_trb_cdr3 = Input(shape=input_shape_trb_cdr3) features_trb_vgene = Input(shape=input_shape_trb_vgene) features_trb_jgene = Input(shape=input_shape_trb_jgene) weights = Input(shape=[]) features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3) features_tra_length = Length()(features_tra_mask) logits_tra_cdr3 = Alignment(num_outputs, num_steps, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_tra_mask) logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3, weights]) logits_tra_length = Dense(num_outputs)(features_tra_length) logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights]) logits_tra_vgene = Dense(num_outputs)(features_tra_vgene) logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights]) logits_tra_jgene = Dense(num_outputs)(features_tra_jgene) logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights]) features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3) features_trb_length = Length()(features_trb_mask) logits_trb_cdr3 = Alignment(num_outputs, num_steps, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_trb_mask) logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3, weights]) logits_trb_length = Dense(num_outputs)(features_trb_length) logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights]) logits_trb_vgene = Dense(num_outputs)(features_trb_vgene) logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights]) logits_trb_jgene = Dense(num_outputs)(features_trb_jgene) logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights]) logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm]) logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights]) model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm) return model
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs, num_steps): features_tra_cdr3 = Input(shape=input_shape_tra_cdr3) features_tra_vgene = Input(shape=input_shape_tra_vgene) features_tra_jgene = Input(shape=input_shape_tra_jgene) features_trb_cdr3 = Input(shape=input_shape_trb_cdr3) features_trb_vgene = Input(shape=input_shape_trb_vgene) features_trb_jgene = Input(shape=input_shape_trb_jgene) features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3) features_tra_length = Length()(features_tra_mask) logits_tra_cdr3 = Alignment(num_outputs, num_steps, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_tra_mask) logits_tra_cdr3_norm = BatchNormalization(momentum=0.5)(logits_tra_cdr3) logits_tra_length = Dense(num_outputs)(features_tra_length) logits_tra_length_norm = BatchNormalization(momentum=0.5)(logits_tra_length) logits_tra_vgene = Dense(num_outputs)(features_tra_vgene) logits_tra_vgene_norm = BatchNormalization(momentum=0.5)(logits_tra_vgene) logits_tra_jgene = Dense(num_outputs)(features_tra_jgene) logits_tra_jgene_norm = BatchNormalization(momentum=0.5)(logits_tra_jgene) features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3) features_trb_length = Length()(features_trb_mask) logits_trb_cdr3 = Alignment(num_outputs, num_steps, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_trb_mask) logits_trb_cdr3_norm = BatchNormalization(momentum=0.5)(logits_trb_cdr3) logits_trb_length = Dense(num_outputs)(features_trb_length) logits_trb_length_norm = BatchNormalization(momentum=0.5)(logits_trb_length) logits_trb_vgene = Dense(num_outputs)(features_trb_vgene) logits_trb_vgene_norm = BatchNormalization(momentum=0.5)(logits_trb_vgene) logits_trb_jgene = Dense(num_outputs)(features_trb_jgene) logits_trb_jgene_norm = BatchNormalization(momentum=0.5)(logits_trb_jgene) logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm]) logits_norm = BatchNormalization(momentum=0.5)(logits) probabilities = Activation('softmax')(logits_norm) model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene], outputs=probabilities) return model
def balanced_sampling(xs, ys, ws, batch_size): rs = np.arange(xs[0].shape[0]) ws_ = (ws / np.sum(ws)) while True: js = np.random.choice(rs, size=batch_size, p=ws_) (yield ((xs[0][js], xs[1][js], xs[2][js], xs[3][js], xs[4][js], xs[5][js]), ys[js]))
def balanced_sampling(xs, ys, ws, batch_size): rs = np.arange(xs[0].shape[0]) ws_ = (ws / np.sum(ws)) while True: js = np.random.choice(rs, size=batch_size, p=ws_) (yield ((xs[0][js], xs[1][js], xs[2][js], xs[3][js], xs[4][js], xs[5][js]), ys[js]))
def load_receptors(path_tsv, min_cdr3_length=8, max_cdr3_length=32): receptors = {} with open(path_tsv, 'r') as stream: reader = csv.DictReader(stream, delimiter='\t') for row in reader: nns = row['nucleotide'] cdr3 = row['aminoAcid'] vgene = row['vGeneName'] dgene = row['dGeneName'] jgene = row['jGeneName'] quantity = np.float64(row['frequencyCount (%)']) status = row['sequenceStatus'] if (('In' in status) and (min_cdr3_length <= len(cdr3)) and (len(cdr3) <= max_cdr3_length)): if (cdr3 not in receptors): receptors[cdr3] = quantity else: receptors[cdr3] += quantity return receptors
def normalize_receptors(receptors): total_quantity = np.float64(0.0) for quantity in sorted(receptors.values()): total_quantity += quantity for receptor in receptors.keys(): receptors[receptor] /= total_quantity return receptors
def insert_receptors(path_db, name, receptors, max_cdr3_length=32): dtype = [('cdr3', ('S' + str(max_cdr3_length))), ('frequency', 'f8')] rs = np.zeros(len(receptors), dtype=dtype) for (i, cdr3) in enumerate(sorted(receptors, key=receptors.get, reverse=True)): rs[i]['cdr3'] = cdr3 rs[i]['frequency'] = receptors[cdr3] flag = ('r+' if os.path.isfile(path_db) else 'w') with h5py.File(path_db, flag) as db: rs_db = db.create_dataset(name, (rs.size,), dtype) rs_db[:] = rs
def insert_samples(path_db, name, samples): dtype = [('sample', 'S32'), ('age', 'f8'), ('label', 'f8'), ('weight', 'f8')] ss = np.zeros(len(samples), dtype=dtype) num_pos = 0.0 for (i, sample) in enumerate(sorted(samples.keys())): if (samples[sample]['diagnosis'] > 0.5): num_pos += 1.0 num_neg = (len(samples) - num_pos) for (i, sample) in enumerate(sorted(samples.keys())): ss[i]['sample'] = sample ss[i]['age'] = samples[sample]['age'] ss[i]['label'] = (1.0 if samples[sample]['diagnosis'] else 0.0) ss[i]['weight'] = ((0.5 / num_pos) if samples[sample]['diagnosis'] else (0.5 / num_neg)) flag = ('r+' if os.path.isfile(path_db) else 'w') with h5py.File(path_db, flag) as db: ss_db = db.create_dataset(name, (ss.size,), dtype) ss_db[:] = ss
class Abundance(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def compute_mask(self, inputs, mask=None): return mask def call(self, inputs, mask=None): inputs_expand = K.expand_dims(inputs, axis=1) outputs = K.log(inputs_expand) return outputs
class Alignment(Layer): def __init__(self, filters, weight_steps, penalties_feature=0.0, penalties_filter=0.0, length_normalize=False, kernel_initializer='uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): self.filters = filters self.weight_steps = weight_steps self.penalties_feature = penalties_feature self.penalties_filter = penalties_filter self.length_normalize = length_normalize self.kernel_initializer = kernel_initializer self.bias_initializer = bias_initializer self.kernel_regularizer = kernel_regularizer self.bias_regularizer = bias_regularizer self.kernel_constraint = kernel_constraint self.bias_constraint = bias_constraint super(__class__, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(name='kernel', shape=[self.weight_steps, int(input_shape[2]), self.filters], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True) self.bias = self.add_weight(name='bias', shape=[self.filters], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True) super(__class__, self).build(input_shape) def compute_mask(self, inputs, mask=None): if (mask is None): return mask return K.any(mask, axis=1) def call(self, inputs, mask=None): scores = alignment_score(inputs, mask, self.kernel, penalties_feature=self.penalties_feature, penalties_weight=self.penalties_filter) if self.length_normalize: lengths_feature = K.sum(K.cast(mask, dtype=inputs.dtype), axis=1, keepdims=True) lengths_weight = K.cast(self.weight_steps, inputs.dtype) lengths = K.minimum(lengths_feature, lengths_weight) scores = (scores / K.sqrt(lengths)) logits = (scores + self.bias) return logits
class BatchExpand(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def call(self, inputs, mask=None): (x, y) = inputs outputs = (x * K.ones_like(y, dtype=x.dtype)) return outputs
class FullFlatten(Layer): def compute_mask(self, inputs, mask=None): return None def call(self, inputs, mask=None): outputs = tf.reshape(inputs, [(- 1)]) return outputs
class Length(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def compute_mask(self, inputs, mask=None): if (mask is None): return mask return K.any(mask, axis=1) def call(self, inputs, mask=None): lengths = K.sum(K.cast(mask, dtype=inputs.dtype), axis=1, keepdims=True) return lengths
class NormalizeInitializationByAggregation(Layer): def __init__(self, level, epsilon=1e-05, **kwargs): self.level = level self.epsilon = epsilon super(__class__, self).__init__(**kwargs) def build(self, input_shape): (input_shape, _, _) = input_shape self.numerator = self.add_weight(name='mean', shape=input_shape[1:], initializer=Zeros(), trainable=False) self.numerator_sq = self.add_weight(name='numerator_sq', shape=input_shape[1:], initializer=Zeros(), trainable=False) self.denominator = self.add_weight(name='denominator', shape=[1], initializer=Constant(1e-05), trainable=False) super(__class__, self).build(input_shape) def compute_mask(self, inputs, mask=None): return None def call(self, inputs): (inputs, weights, level_) = inputs level = tf.reshape(tf.cast(self.level, level_.dtype), [1]) weights_expand = tf.expand_dims(weights, axis=1) numerator_block = tf.reduce_sum((weights_expand * inputs), axis=0) numerator_sq_block = tf.reduce_sum((weights_expand * (inputs ** 2)), axis=0) denominator_block = tf.reduce_sum(weights_expand, axis=0) indicator = tf.cast(tf.equal(level, level_), numerator_block.dtype) numerator = K.update_add(self.numerator, (indicator * numerator_block)) numerator_sq = K.update_add(self.numerator_sq, (indicator * numerator_sq_block)) denominator = K.update_add(self.denominator, (indicator * denominator_block)) mean = (numerator / denominator) variance = ((numerator_sq / denominator) - (mean ** 2)) mean_expand = tf.expand_dims(mean, axis=0) variance_expand = tf.expand_dims(variance, axis=0) outputs = ((inputs - mean_expand) / tf.sqrt((variance_expand + self.epsilon))) return outputs
def load_similarity_matrix(filename): similarity_matrix = {} reader = csv.DictReader(open(filename, 'r')) entries = [] for row in reader: entries.append(row) for k in reader.fieldnames: if (len(k) < 1): continue similarity_matrix[k] = [float(obj[k]) for obj in entries] return similarity_matrix
def print_matrix(m, cdr3): max_col = len(cdr3) print((' %11s' % ''), end='') for col in range(0, max_col): print((' %11s' % cdr3[col]), end='') print('') for row in range(0, 9): for col in range(0, (max_col + 1)): print((' %11.4f' % m[row][col]), end='') print('')
def print_bp(bp, cdr3): max_col = len(cdr3) print((' %11s' % ''), end='') for col in range(0, max_col): print((' %11s' % cdr3[col]), end='') print('') for row in range(0, 9): for col in range(0, (max_col + 1)): print((' %11s' % bp[row][col]), end='') print('')
def print_alignment(bp, cdr3): cdr3_align = [] theta_align = [] max_col = len(cdr3) col = max_col row = 8 done = False while (not done): if (bp[row][col] == 'diag'): theta_align.append(row) cdr3_align.append(cdr3[(col - 1)]) row -= 1 col -= 1 elif (bp[row][col] == 'up'): theta_align.append(row) cdr3_align.append('.') row -= 1 elif (bp[row][col] == 'left'): theta_align.append('.') cdr3_align.append(cdr3[(col - 1)]) col -= 1 else: print('ERROR') if ((row <= 0) or (col <= 0)): done = True if (row != 0): for i in range(row, 0, (- 1)): theta_align.append(i) cdr3_align.append('.') if (col != 0): for i in range(col, 0, (- 1)): theta_align.append('.') cdr3_align.append(cdr3[(col - 1)]) align_str = '' for c in list(reversed(theta_align)): align_str += str(c) return align_str
def do_alignment(sm, cdr3): theta_gap = (- 1000) cdr3_gap = 0 max_col = (len(cdr3) + 1) am = [] bp = [] for row in range(0, 9): am.append([0.0 for col in range(0, max_col)]) bp.append([None for col in range(0, max_col)]) score = 0 for row in range(0, 9): am[row][0] = score score += theta_gap score = 0 for col in range(0, max_col): am[0][col] = score score += cdr3_gap for col in range(1, max_col): cdr3_pos = (col - 1) for row in range(1, 9): theta_pos = (row - 1) up = (am[(row - 1)][col] + theta_gap) diag = (am[(row - 1)][(col - 1)] + sm[cdr3[cdr3_pos]][theta_pos]) left = (am[row][(col - 1)] + cdr3_gap) if (up > diag): if (up > left): am[row][col] = up bp[row][col] = 'up' else: am[row][col] = left bp[row][col] = 'left' elif (diag > left): am[row][col] = diag bp[row][col] = 'diag' else: am[row][col] = left bp[row][col] = 'left' return [am, bp]
def do_file_alignment(input, output, sm_tra, sm_trb, tag): reader = csv.DictReader(open(input, 'r')) fieldnames = reader.fieldnames.copy() fieldnames.append(('tra_alignment_' + tag)) fieldnames.append(('tra_score_' + tag)) fieldnames.append(('trb_alignment_' + tag)) fieldnames.append(('trb_score_' + tag)) writer = csv.DictWriter(open(output, 'w'), fieldnames=fieldnames) writer.writeheader() for row in reader: r = 32 col = len(row['tra_cdr3']) tra_align = do_alignment(sm_tra, row['tra_cdr3']) row[('tra_alignment_' + tag)] = print_alignment(tra_align[1], row['tra_cdr3']) row[('tra_score_' + tag)] = (tra_align[0][r][col] / math.sqrt(float(col))) col = len(row['trb_cdr3']) trb_align = do_alignment(sm_trb, row['trb_cdr3']) row[('trb_alignment_' + tag)] = print_alignment(trb_align[1], row['trb_cdr3']) row[('trb_score_' + tag)] = (trb_align[0][r][col] / math.sqrt(float(col))) writer.writerow(row)
def test_alignment(sm, cdr3): align = do_alignment(sm, cdr3) print_matrix(align[0], cdr3) print_bp(align[1], cdr3) print(print_alignment(align[1], cdr3))
class BatchExpand(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def call(self, inputs, mask=None): (x, y) = inputs outputs = (x * K.ones_like(y, dtype=x.dtype)) return outputs
class GlobalPoolWithMask(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def compute_mask(self, inputs, mask=None): return tf.reduce_any(mask, axis=1) def call(self, inputs, mask=None): indicators = tf.expand_dims(tf.cast(mask, dtype=inputs.dtype), axis=2) penalties = ((- 1e+16) * (1.0 - indicators)) outputs = tf.reduce_max((inputs + penalties), axis=1) return outputs
def generate_model(input_shape_cdr3, num_outputs, filter_size): features_cdr3 = Input(shape=input_shape_cdr3) features_quantity = Input(shape=[]) feature_age = Input(batch_shape=[1]) weight = Input(batch_shape=[1]) level = Input(batch_shape=[1]) features_mask = Masking(mask_value=0.0)(features_cdr3) features_length = Length()(features_mask) features_abundance = Abundance()(features_quantity) features_age = BatchExpand()([feature_age, features_abundance]) weights_instance = Multiply()([weight, features_quantity]) logits_cdr3 = Alignment(num_outputs, filter_size, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_mask) logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3, weights_instance, level]) feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level]) logits_length = Dense(num_outputs)(feature_length_norm) logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level]) features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level]) logits_abundance = Dense(num_outputs)(features_abundance_norm) logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level]) features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level]) logits_age = Dense(num_outputs)(features_age_norm) logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level]) logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm]) logits_aggregate = Aggregate()(logits) logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level]) logits_flat = FullFlatten()(logits_aggregate_norm) model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat) return model
def generate_model(input_shape_cdr3, num_outputs, filter_size): features_cdr3 = Input(shape=input_shape_cdr3) features_quantity = Input(shape=[]) feature_age = Input(batch_shape=[1]) weight = Input(batch_shape=[1]) level = Input(batch_shape=[1]) features_mask = Masking(mask_value=0.0)(features_cdr3) features_length = Length()(features_mask) features_abundance = Abundance()(features_quantity) features_age = BatchExpand()([feature_age, features_abundance]) weights_instance = Multiply()([weight, features_quantity]) num_filters = (2 * num_outputs) logits_cdr3 = Alignment(num_filters, filter_size, penalties_feature=0.0, penalties_filter=(- 1e+16), length_normalize=False)(features_mask) logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3, weights_instance, level]) feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level]) logits_length = Dense(num_filters)(feature_length_norm) logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level]) features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level]) logits_abundance = Dense(num_filters)(features_abundance_norm) logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level]) features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level]) logits_age = Dense(num_filters)(features_age_norm) logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level]) logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm]) logits_aggregate = Aggregate2Instances()(logits) logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level]) logits_flat = FullFlatten()(logits_aggregate_norm) model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat) return model
class BatchExpand(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def call(self, inputs, mask=None): (x, y) = inputs outputs = (x * K.ones_like(y, dtype=x.dtype)) return outputs
class GlobalPoolWithMask(Layer): def __init__(self, **kwargs): super(__class__, self).__init__(**kwargs) def compute_mask(self, inputs, mask=None): return tf.reduce_any(mask, axis=1) def call(self, inputs, mask=None): indicators = tf.expand_dims(tf.cast(mask, dtype=inputs.dtype), axis=2) penalties = ((- 1e+16) * (1.0 - indicators)) outputs = tf.reduce_max((inputs + penalties), axis=1) return outputs
def generate_model(input_shape_cdr3, num_outputs, filter_size): features_cdr3 = Input(shape=input_shape_cdr3) features_quantity = Input(shape=[]) feature_age = Input(batch_shape=[1]) weight = Input(batch_shape=[1]) level = Input(batch_shape=[1]) features_mask = Masking(mask_value=0.0)(features_cdr3) features_length = Length()(features_mask) features_abundance = Abundance()(features_quantity) features_age = BatchExpand()([feature_age, features_abundance]) weights_instance = Multiply()([weight, features_quantity]) logits_cdr3 = Conv1D(num_outputs, filter_size)(features_cdr3) logits_cdr3_mask = MaskCopy(trim_front=(filter_size - 1))([logits_cdr3, features_mask]) logits_cdr3_pool = GlobalPoolWithMask()(logits_cdr3_mask) logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3_pool, weights_instance, level]) feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level]) logits_length = Dense(num_outputs)(feature_length_norm) logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level]) features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level]) logits_abundance = Dense(num_outputs)(features_abundance_norm) logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level]) features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level]) logits_age = Dense(num_outputs)(features_age_norm) logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level]) logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm]) logits_aggregate = Aggregate()(logits) logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level]) logits_flat = FullFlatten()(logits_aggregate_norm) model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat) return model
def generate_model(input_shape_cdr3, num_outputs, filter_size): features_cdr3 = Input(shape=input_shape_cdr3) features_quantity = Input(shape=[]) feature_age = Input(batch_shape=[1]) weight = Input(batch_shape=[1]) level = Input(batch_shape=[1]) features_mask = Masking(mask_value=0.0)(features_cdr3) features_length = Length()(features_mask) features_abundance = Abundance()(features_quantity) features_age = BatchExpand()([feature_age, features_abundance]) weights_instance = Multiply()([weight, features_quantity]) logits_cdr3 = Alignment(num_outputs, filter_size, penalties_feature=0.0, penalties_filter=(- 1e+16), length_normalize=False)(features_mask) logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3, weights_instance, level]) feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level]) logits_length = Dense(num_outputs)(feature_length_norm) logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level]) features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level]) logits_abundance = Dense(num_outputs)(features_abundance_norm) logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level]) features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level]) logits_age = Dense(num_outputs)(features_age_norm) logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level]) logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm]) logits_aggregate = Aggregate()(logits) logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level]) logits_flat = FullFlatten()(logits_aggregate_norm) model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat) return model
def train_one_epoch(model: torch.nn.Module, dl, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, args=None): model.train(True) optimizer.zero_grad() metric_logger = misc.MetricLogger(delimiter=' ') metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = f'Epoch: [{epoch}]' print_freq = 10 for (batch_idx, batch) in enumerate(metric_logger.log_every(dl, print_freq, header)): misc.adjust_learning_rate(optimizer, ((batch_idx / len(dl)) + epoch), args) x = mem_inputs_to_device(batch, device, args) ddg_dense1 = batch['ddg_dense'] = batch['ddg_dense'].to(device, non_blocking=True) ddg_dense2 = batch['ddg_dense2'] = batch['ddg_dense2'].to(device, non_blocking=True) pred = model(x, batch) losses = loss_single_double(pred, ddg_dense1, ddg_dense2, batch, args, True) loss = sum(losses.values()) if (not math.isfinite(loss.item())): print('Loss is {}, stopping training'.format(loss.item())) sys.exit(1) loss.backward() nn.utils.clip_grad_norm_(model.parameters(), 5.0) optimizer.step() optimizer.zero_grad() lr = optimizer.param_groups[0]['lr'] losses_detach = {f'train_{k}': v.cpu().item() for (k, v) in losses.items()} metric_logger.update(lr=lr) metric_logger.update(loss=loss.item()) metric_logger.update(**losses_detach) if ((not args.disable_wandb) and misc.is_main_process()): wandb.log({'train_loss': loss.item(), 'lr': lr, **losses_detach}) metric_logger.synchronize_between_processes() print('Averaged stats:', metric_logger) return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
@torch.no_grad() def evaluate(model, dl, device, args): model.eval() metric_logger = misc.MetricLogger(delimiter=' ') header = 'Test:' all_preds = {} for batch in metric_logger.log_every(dl, 10, header): x = mem_inputs_to_device(batch, device, args) batch['known_mask1'] = known_mask1 = (batch['ddg_dense'] != 999) batch['known_mask2'] = known_mask2 = (batch['ddg_dense2'] != 999) eval_single = known_mask1.any() eval_double = known_mask2.any() eval_list = (('mut_info_list' in batch) and (sum([len(x) for x in batch['mut_info_list']]) > 0)) batch['only_eval_single'] = (eval_single and (not eval_double) and (not eval_list)) batch['eval_list'] = eval_list pred_dict = model(x, batch) pr1 = pred_dict['mut1_ddg'] pr2 = pred_dict.get('mut2_ddg', None) for b in range(len(x)): pdb_id = batch['pdb_ids'][b] seq = batch['seqs'][b] (muts, scores) = ([], []) if eval_list: muts.append(np.array(batch['mut_info_list'][b])) scores.append((- pred_dict['pr_ddgs_list'][b].detach().cpu().numpy())) if eval_double: (mutations, valid_mask) = get_dense_double_mut_infos(seq) pr_ddgs = pr2[b].flatten() keep_inds = known_mask2[b].flatten().cpu().numpy() muts.append(mutations[(keep_inds & valid_mask)]) scores.append((- pr_ddgs[(keep_inds & valid_mask)].detach().cpu().numpy())) if eval_single: mutations = np.array(get_dense_mut_infos(seq)) pr_ddgs = pr1[b].flatten() keep_inds = known_mask1[b].flatten().cpu().numpy() muts.append(mutations[keep_inds]) scores.append((- pr_ddgs[keep_inds].detach().cpu().numpy())) all_preds[pdb_id] = {'mutations': np.concatenate(muts), 'scores': np.concatenate(scores)} if args.dist_eval: print('Start gathering predictions') torch.cuda.empty_cache() all_preds = misc.gather_dict_keys_on_main(all_preds) print(f'Finished gathering predictions') if (not misc.is_main_process()): return {} ds_name = dl.dataset.name (metrics, metrics_det, metrics_det_pdb, copypaste, merged_df) = eval_ddg(dl.dataset.mutdf, all_preds) merged_df['ddg_pred'] = (- merged_df['scores']) print(f'Saving results to {args.output_dir}/results_{ds_name}.csv') print(f'Saving metrics to {args.output_dir}/metrics_{ds_name}.csv') merged_df.to_csv(f'{args.output_dir}/results_{ds_name}.csv', index=False) if (metrics_det is not None): metrics_det_pdb.to_csv(f'{args.output_dir}/metrics_{ds_name}.csv', index=False) print(metrics_det) print(ds_name, copypaste) metric_logger.update(**metrics) ret = {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} ret['copypasta'] = copypaste ret = {f'{ds_name}_{k}': v for (k, v) in ret.items()} if ((not args.disable_wandb) and misc.is_main_process()): wandb.log(ret) return ret
def get_args_parser(): parser = argparse.ArgumentParser('Train Sequence Detector') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--aa_expand', default='backbone', help='scratch|backbone') parser.add_argument('--single_dec', default='naive', help='naive') parser.add_argument('--multi_dec', default='epistasis', help='additive|epistasis') parser.add_argument('--head_dim', type=int, default=128) parser.add_argument('--backbone', default='esm2_t33_650M_UR50D', help='af|esm2_t33_650M_UR50D') parser.add_argument('--finetune_backbone', type=str, default='models/finetuning_ptm_2.pt') parser.add_argument('--freeze_at', default=0, help='freeze backbone up to layer X') parser.add_argument('--n_msa_seqs', type=int, default=128) parser.add_argument('--n_extra_msa_seqs', type=int, default=1024) parser.add_argument('--af_extract_feat', type=str, default='both', help='which features to use from AF: both|evo|struct') parser.add_argument('--data_path', type=str, default='data/cdna_train.csv') parser.add_argument('--eval_data_paths', type=str, default='data/cdna2_test.csv,data/ptmul.csv,data/s669.csv', help='comma separated string of data paths to evaluate') parser.add_argument('--max_context_length', type=int, default=2000, help='max length of protein sequence') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--epochs', type=int, default=100) parser.add_argument('--batch_size', type=int, default=4) parser.add_argument('--lr', type=float, default=0.0003) parser.add_argument('--min_lr', type=float, default=1e-09) parser.add_argument('--weight_decay', type=float, default=0.5) parser.add_argument('--warmup_epochs', type=int, default=10) parser.add_argument('--lambda_single', type=float, default=0.1) parser.add_argument('--lambda_double', type=float, default=1.0) parser.add_argument('--double_subsample_destabilizing_ratio', type=float, default=8) parser.add_argument('--lambda_pos', type=float, default=4) parser.add_argument('--eval', action='store_true') parser.add_argument('--dist_eval', action='store_true') parser.add_argument('--test', action='store_true', help='when testing, please use data_path NOT eval_data_paths') parser.add_argument('--finetune', default='', type=str) parser.add_argument('--resume', default='', type=str) parser.add_argument('--start_epoch', type=int, default=0) parser.add_argument('--output_dir', type=Path, default='logs/mutate_everything') parser.add_argument('--eval_period', type=int, default=10) parser.add_argument('--save_period', type=int, default=1000) parser.add_argument('--disable_wandb', action='store_true') parser.add_argument('--device', default='cuda') parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=(- 1), type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') return parser
def main(args): misc.init_distributed_mode(args) if ((not args.disable_wandb) and misc.is_main_process()): run_name = args.output_dir.name wandb.init(project='mutate_everything', name=run_name, config=args, dir=args.output_dir) print(args) device = torch.device(args.device) seed = (args.seed + misc.get_rank()) torch.manual_seed(seed) np.random.seed(seed) model = MutateEverything(args) alphabet = model.backbone.get_alphabet() n_params = sum((p.numel() for p in model.parameters())) n_params_grad = sum((p.numel() for p in model.parameters() if p.requires_grad)) print(model) print(f'Training {n_params_grad} of {n_params} parameters') model.to(args.device) model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module num_tasks = misc.get_world_size() global_rank = misc.get_rank() param_groups = misc.param_groups_weight_decay(model, args.weight_decay) optimizer = optim.AdamW(param_groups, lr=args.lr, weight_decay=args.weight_decay) misc.load_model(args, model_without_ddp, optimizer, None) dls_test = [] for eval_data_path in args.eval_data_paths.split(','): ds_test = SeqDetDatatset(eval_data_path, args, train=False) collate_fn = partial(protein_collate_fn, alphabet=alphabet, args=args) if (args.distributed and args.dist_eval): sampler_test = torch.utils.data.DistributedSampler(ds_test, num_replicas=num_tasks, rank=global_rank, shuffle=True) else: sampler_test = torch.utils.data.SequentialSampler(ds_test) dl_test = torch.utils.data.DataLoader(ds_test, sampler=sampler_test, batch_size=1, collate_fn=collate_fn) dls_test.append(dl_test) if args.eval: metrics = {} for dl_test in dls_test: metrics.update(evaluate(model, dl_test, device, args)) if misc.is_main_process(): metrics['copypasta'] = ',,'.join([metrics[f'{dl.dataset.name}_copypasta'] for dl in dls_test]) print(metrics) if ((not args.disable_wandb) and misc.is_main_process()): wandb.log({'copypasta': metrics['copypasta']}) wandb.finish() exit() ds_train = SeqDetDatatset(args.data_path, args, train=True) collate_fn = partial(protein_collate_fn, alphabet=alphabet, args=args) if args.distributed: sampler_train = torch.utils.data.DistributedSampler(ds_train, num_replicas=num_tasks, rank=global_rank, shuffle=True) print(('Sampler_train = %s' % str(sampler_train))) else: sampler_train = torch.utils.data.RandomSampler(ds_train) dl_train = torch.utils.data.DataLoader(ds_train, sampler=sampler_train, batch_size=args.batch_size, collate_fn=collate_fn) print(f'Start training for {args.epochs} epochs, saving to {args.output_dir}') start_time = time.time() for epoch in range(args.start_epoch, args.epochs): if args.distributed: dl_train.sampler.set_epoch(epoch) train_one_epoch(model, dl_train, optimizer, device, epoch, args) if ((epoch % args.eval_period) == (args.eval_period - 1)): for dl_test in dls_test: evaluate(model, dl_test, device, args) if ((epoch % args.save_period) == (args.save_period - 1)): ckpt_path = misc.save_model(args, epoch, model, model_without_ddp, optimizer, None) print(f'Saved checkpoint to {ckpt_path}') total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) metrics = {} for dl_test in dls_test: metrics.update(evaluate(model, dl_test, device, args)) if misc.is_main_process(): metrics['copypasta'] = ',,'.join([metrics[f'{dl.dataset.name}_copypasta'] for dl in dls_test]) print(metrics) if ((not args.disable_wandb) and misc.is_main_process()): wandb.log({'copypasta': metrics['copypasta']}) wandb.finish()
def eval_ddg(df: pd.DataFrame, preds: dict, max_dets: list=[30], max_ddg: float=(- 0.5)): "\n Args:\n df: DataFrame with pdb_id, mut_info, gt ddg\n preds: dict of {pdb_id: {'mutations': [], 'scores': []}}\n mutations formatted f'{cur_aa}{seq_pos}{mut_aa}' (indexing from 1)\n max_dets: max number of predicted mutations to consider\n max_ddg: max ddg value to be positive stabilizing mutation\n Returns:\n metrics: dict of all metrics\n metrics_det: DataFrame of detection metrics\n metrics_det_pdb: DataFrame of detection metrics per pdb\n copypaste: string\n df: merged gt and predictions\n " df = _preprocess_gt_pr(df, preds) metrics = compute_cls_reg_metrics((- df.scores), df.ddg) (metrics_det, metrics_det_pdb) = compute_detection_metrics(df, max_dets, max_ddg) metrics.update({f'{row.variable}_{row.max_ddg}_{row.max_det}': row.value for (_, row) in metrics_det.melt(id_vars=['max_ddg', 'max_det']).iterrows()}) copypaste = ','.join([f'{metrics[name]:.02f}' for name in COPYPASTE_NAMES_DDG]) return (metrics, metrics_det, metrics_det_pdb, copypaste, df)
def _preprocess_gt_pr(df, preds): ' Clean the GT, then merge the predictions into the GT dataframe. ' df = df[(~ df.mut_info.isna())] df = df[(~ df.ddg.isna())] df['mut_info'] = df['mut_info'].str.upper() df = df.groupby(['pdb_id', 'mut_info'], as_index=False).median(numeric_only=True) if (('ddg' in df.columns) and (np.mean((df.ddg < 0)) > 0.5)): print('WARNING: more stabilizing than destabilizing, flipping sign') df['ddg'] = ((- 1) * df['ddg']) pdbs_eval = list(preds.keys()) gt_pdb_ids = list(df.pdb_id.unique()) if (len(pdbs_eval) == 0): return {} assert set(pdbs_eval).issubset(set(gt_pdb_ids)), f'preds for {(set(pdbs_eval) - set(gt_pdb_ids))} not in gt' preds_df = [] for (pdb_id, preds_pdb) in preds.items(): preds_pdb_df = pd.DataFrame.from_dict(preds_pdb) preds_pdb_df['pdb_id'] = pdb_id preds_df.append(preds_pdb_df) preds_df = pd.concat(preds_df).rename({'mutations': 'mut_info'}, axis=1) preds_df['mut_info'] = preds_df['mut_info'].str.upper() df = df.merge(preds_df, on=['pdb_id', 'mut_info'], how='left') return df
def compute_detection_metrics(df: pd.DataFrame, max_dets: list=[30], max_ddg: float=(- 0.5)): metrics_pdb = [] for pdb_id in df.pdb_id.unique(): df_pdb = df[(df.pdb_id == pdb_id)].sort_values('scores', ascending=False) scores = df_pdb.scores.to_numpy() ddg = df_pdb.ddg.to_numpy() nddg = np.maximum((- ddg), 0) df_pdb_stbl = df_pdb[(ddg <= max_ddg)] muts_sorted = df_pdb.mut_info.to_list() if ((len(df_pdb) <= 1) or (len(df_pdb_stbl) == 0)): continue for max_det in sorted(max_dets): metrics_ = {'pdb_id': pdb_id, 'max_det': max_det, 'max_ddg': max_ddg, 'n_tot_muts': len(df_pdb), 'ndcg': ndcg_score(nddg[None], scores[None], k=max_det), **compute_precision(df_pdb_stbl, muts_sorted[:max_det])} metrics_pdb.append(metrics_) metrics_pdb = pd.DataFrame(metrics_pdb) assert (len(metrics_pdb) > 0), 'no pdbs evaluated' summary = metrics_pdb.groupby(['max_ddg', 'max_det'], as_index=False).mean(numeric_only=True) counts = metrics_pdb.groupby(['max_ddg', 'max_det'], as_index=False).pdb_id.count() summary = pd.merge(counts, summary, on=['max_ddg', 'max_det']) return (summary, metrics_pdb)
def compute_precision(gt_pdb, pr_muts_sorted): '\n gt_pdb: DataFrame with pdb_id, mut_info, gt ddg with ONLY ddg < threshold\n pr_muts_sorted: list of mutations sorted by score already filtered to max_det\n ' assert (len(gt_pdb.pdb_id.unique()) == 1), f'more than 1 pdb {gt_pdb.pdb_id.unique()}' metrics = {} gt_mut = gt_pdb.mut_info.str.upper().to_numpy() is_tp = np.array([(mut in gt_mut) for mut in pr_muts_sorted]) metrics['is_tp'] = is_tp metrics['prec'] = is_tp.mean() return metrics