text
stringlengths 1
93.6k
|
|---|
# Select template type
|
template_type = prompt_until_valid('Template type (HTTP/Network/File/DNS): ', ['HTTP', 'Network', 'File', 'DNS'])
|
try:
|
if template_type == 'HTTP':
|
template['requests'] = add_http_requests(unsafe)
|
elif template_type == 'Network':
|
template['network'] = add_network_requests(unsafe)
|
elif template_type == 'File':
|
template['file'] = add_file_requests(unsafe)
|
elif template_type == 'DNS':
|
template['dns'] = add_dns_requests(unsafe)
|
except Exception as e:
|
print(f"An error occurred: {e}")
|
# Output template
|
print(yaml.dump(template))
|
if __name__ == "__main__":
|
main()
|
# <FILESEP>
|
#!/usr/bin/env python
|
import numpy as np
|
import pandas as pd
|
import click as ck
|
from sklearn.metrics import classification_report
|
from sklearn.metrics.pairwise import cosine_similarity
|
import sys
|
from collections import deque
|
import time
|
import logging
|
from sklearn.metrics import roc_curve, auc, matthews_corrcoef
|
from scipy.spatial import distance
|
from scipy import sparse
|
import math
|
from utils import FUNC_DICT, Ontology, NAMESPACES
|
from matplotlib import pyplot as plt
|
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
|
@ck.command()
|
@ck.option(
|
'--train-data-file', '-trdf', default='data/train_data_train.pkl',
|
help='Data file with training features')
|
@ck.option(
|
'--valid-data-file', '-trdf', default='data/train_data_valid.pkl',
|
help='Data file with training features')
|
@ck.option(
|
'--terms-file', '-tf', default='data/terms.pkl',
|
help='Data file with sequences and complete set of annotations')
|
@ck.option(
|
'--diamond-scores-file', '-dsf', default='data/valid_diamond.res',
|
help='Diamond output')
|
@ck.option(
|
'--ont', '-o', default='mf',
|
help='GO subontology (bp, mf, cc)')
|
def main(train_data_file, valid_data_file, terms_file, diamond_scores_file, ont):
|
go_rels = Ontology('data/go.obo', with_rels=True)
|
terms_df = pd.read_pickle(terms_file)
|
terms = terms_df['terms'].values.flatten()
|
terms_dict = {v: i for i, v in enumerate(terms)}
|
train_df = pd.read_pickle(train_data_file)
|
valid_df = pd.read_pickle(valid_data_file)
|
annotations = train_df['annotations'].values
|
annotations = list(map(lambda x: set(x), annotations))
|
valid_annotations = valid_df['annotations'].values
|
valid_annotations = list(map(lambda x: set(x), valid_annotations))
|
go_rels.calculate_ic(annotations + valid_annotations)
|
# Print IC values of terms
|
ics = {}
|
for term in terms:
|
ics[term] = go_rels.get_ic(term)
|
prot_index = {}
|
for i, row in enumerate(train_df.itertuples()):
|
prot_index[row.proteins] = i
|
# BLAST Similarity (Diamond)
|
diamond_scores = {}
|
with open(diamond_scores_file) as f:
|
for line in f:
|
it = line.strip().split()
|
if it[0] not in diamond_scores:
|
diamond_scores[it[0]] = {}
|
diamond_scores[it[0]][it[1]] = float(it[2])
|
blast_preds = []
|
for i, row in enumerate(valid_df.itertuples()):
|
annots = {}
|
prot_id = row.proteins
|
# BlastKNN
|
if prot_id in diamond_scores:
|
sim_prots = diamond_scores[prot_id]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.