|
|
import multiprocessing as mp |
|
|
import TestPool_Unit |
|
|
from shutil import copyfile |
|
|
|
|
|
def Evaluate(result_arr): |
|
|
print('Files Processed: ', len(result_arr)) |
|
|
recalls = [] |
|
|
recalls_of_word = [] |
|
|
precisions = [] |
|
|
precisions_of_words = [] |
|
|
for entry in result_arr: |
|
|
(word_match, lemma_match, n_dcsWords, n_output_nodes) = entry |
|
|
recalls.append(lemma_match/n_dcsWords) |
|
|
recalls_of_word.append(word_match/n_dcsWords) |
|
|
|
|
|
precisions.append(lemma_match/n_output_nodes) |
|
|
precisions_of_words.append(word_match/n_output_nodes) |
|
|
print('Avg. Micro Recall of Lemmas: {}'.format(np.mean(np.array(recalls)))) |
|
|
print('Avg. Micro Recall of Words: {}'.format(np.mean(np.array(recalls_of_word)))) |
|
|
print('Avg. Micro Precision of Lemmas: {}'.format(np.mean(np.array(precisions)))) |
|
|
print('Avg. Micro Precision of Words: {}'.format(np.mean(np.array(precisions_of_words)))) |
|
|
|
|
|
modelFile = 'outputs/train_nnet_t764815831413.p' |
|
|
|
|
|
copyfile(modelFile, modelFile + '.bk') |
|
|
|
|
|
modelFile = modelFile + '.bk' |
|
|
|
|
|
|
|
|
queue = mp.Queue() |
|
|
result_arr = [] |
|
|
|
|
|
|
|
|
proc_count = 10 |
|
|
procs = [None]*proc_count |
|
|
for i in range(proc_count): |
|
|
vpid = i |
|
|
procs[i] = mp.Process(target = TestPool_Unit.pooled_Test, args = (modelFile, vpid, queue, 700)) |
|
|
|
|
|
|
|
|
for i in range(proc_count): |
|
|
procs[i].start() |
|
|
|
|
|
|
|
|
for i in range(proc_count): |
|
|
procs[i].join() |
|
|
|
|
|
|
|
|
while not queue.empty(): |
|
|
result_arr.append(queue.get()) |
|
|
|
|
|
|
|
|
Evaluate(result_arr) |
|
|
|