text
stringlengths 1
93.6k
|
|---|
allgos = set()
|
total_score = 0.0
|
for p_id, score in sim_prots.items():
|
allgos |= annotations[prot_index[p_id]]
|
total_score += score
|
allgos = list(sorted(allgos))
|
sim = np.zeros(len(allgos), dtype=np.float32)
|
for j, go_id in enumerate(allgos):
|
s = 0.0
|
for p_id, score in sim_prots.items():
|
if go_id in annotations[prot_index[p_id]]:
|
s += score
|
sim[j] = s / total_score
|
ind = np.argsort(-sim)
|
for go_id, score in zip(allgos, sim):
|
annots[go_id] = score
|
blast_preds.append(annots)
|
# DeepGOPlus
|
go_set = go_rels.get_namespace_terms(NAMESPACES[ont])
|
go_set.remove(FUNC_DICT[ont])
|
labels = valid_df['annotations'].values
|
labels = list(map(lambda x: set(filter(lambda y: y in go_set, x)), labels))
|
print(len(go_set))
|
best_fmax = 0.0
|
best_alpha = 0.0
|
for alpha in range(40, 70):
|
alpha /= 100.0
|
deep_preds = []
|
for i, row in enumerate(valid_df.itertuples()):
|
annots_dict = blast_preds[i].copy()
|
for go_id in annots_dict:
|
annots_dict[go_id] *= alpha
|
for j, score in enumerate(row.preds):
|
go_id = terms[j]
|
score *= 1 - alpha
|
if go_id in annots_dict:
|
annots_dict[go_id] += score
|
else:
|
annots_dict[go_id] = score
|
deep_preds.append(annots_dict)
|
fmax = 0.0
|
tmax = 0.0
|
precisions = []
|
recalls = []
|
smin = 1000000.0
|
rus = []
|
mis = []
|
for t in range(10, 30):
|
threshold = t / 100.0
|
preds = []
|
for i, row in enumerate(valid_df.itertuples()):
|
annots = set()
|
for go_id, score in deep_preds[i].items():
|
if score >= threshold:
|
annots.add(go_id)
|
new_annots = set()
|
for go_id in annots:
|
new_annots |= go_rels.get_anchestors(go_id)
|
preds.append(new_annots)
|
# Filter classes
|
preds = list(map(lambda x: set(filter(lambda y: y in go_set, x)), preds))
|
fscore, prec, rec, s, ru, mi, fps, fns = evaluate_annotations(go_rels, labels, preds)
|
avg_fp = sum(map(lambda x: len(x), fps)) / len(fps)
|
avg_ic = sum(map(lambda x: sum(map(lambda go_id: go_rels.get_ic(go_id), x)), fps)) / len(fps)
|
print(f'Fscore: {fscore}, Precision: {prec}, Recall: {rec} S: {s}, RU: {ru}, MI: {mi} threshold: {threshold}')
|
if fmax < fscore:
|
fmax = fscore
|
tmax = threshold
|
if smin > s:
|
smin = s
|
if best_fmax < fmax:
|
best_fmax = fmax
|
best_alpha = alpha
|
print(f'Alpha: {alpha} Fmax: {fmax:0.3f}, Smin: {smin:0.3f}, threshold: {tmax}')
|
print(f'{best_alpha} {best_fmax}')
|
def compute_roc(labels, preds):
|
# Compute ROC curve and ROC area for each class
|
fpr, tpr, _ = roc_curve(labels.flatten(), preds.flatten())
|
roc_auc = auc(fpr, tpr)
|
return roc_auc
|
def compute_mcc(labels, preds):
|
# Compute ROC curve and ROC area for each class
|
mcc = matthews_corrcoef(labels.flatten(), preds.flatten())
|
return mcc
|
def evaluate_annotations(go, real_annots, pred_annots):
|
total = 0
|
p = 0.0
|
r = 0.0
|
p_total= 0
|
ru = 0.0
|
mi = 0.0
|
fps = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.