Spaces:
Runtime error
Runtime error
Commit
·
189514b
1
Parent(s):
2dd0724
Revert model in generator
Browse files- generator.py +25 -25
generator.py
CHANGED
|
@@ -36,8 +36,8 @@ import streamlit as st
|
|
| 36 |
def load_model():
|
| 37 |
hfm = pickle.load(open('hfmodel.sav','rb'))
|
| 38 |
hft = T5TokenizerFast.from_pretrained("t5-base")
|
| 39 |
-
model = pickle.load(open('
|
| 40 |
-
tok = et.from_pretrained("
|
| 41 |
# return hfm, hft,tok, model
|
| 42 |
return hfm, hft,tok, model
|
| 43 |
|
|
@@ -67,29 +67,29 @@ def run_model(input_string, **generator_args):
|
|
| 67 |
# al_tokenizer = pickle.load(open('models/al_tokenizer.sav', 'rb'))
|
| 68 |
def QA(question, context):
|
| 69 |
# model_name="deepset/electra-base-squad2"
|
| 70 |
-
nlp = pipeline("question-answering",model=model,tokenizer = tok)
|
| 71 |
-
format = {
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
}
|
| 75 |
-
res = nlp(format)
|
| 76 |
-
output = f"{question}\n{string.capwords(res['answer'])}\n"
|
| 77 |
-
return output
|
| 78 |
-
|
| 79 |
-
#
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
#
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
#
|
| 88 |
-
#
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
# QA("What was the first C program","The first prgram written in C was Hello World")
|
| 94 |
|
| 95 |
def gen_question(inputs):
|
|
|
|
| 36 |
def load_model():
|
| 37 |
hfm = pickle.load(open('hfmodel.sav','rb'))
|
| 38 |
hft = T5TokenizerFast.from_pretrained("t5-base")
|
| 39 |
+
model = pickle.load(open('model.sav','rb'))
|
| 40 |
+
tok = et.from_pretrained("ahotrod/albert_xxlargev1_squad2_512 ")
|
| 41 |
# return hfm, hft,tok, model
|
| 42 |
return hfm, hft,tok, model
|
| 43 |
|
|
|
|
| 67 |
# al_tokenizer = pickle.load(open('models/al_tokenizer.sav', 'rb'))
|
| 68 |
def QA(question, context):
|
| 69 |
# model_name="deepset/electra-base-squad2"
|
| 70 |
+
# nlp = pipeline("question-answering",model=model,tokenizer = tok)
|
| 71 |
+
# format = {
|
| 72 |
+
# 'question':question,
|
| 73 |
+
# 'context':context
|
| 74 |
+
# }
|
| 75 |
+
# res = nlp(format)
|
| 76 |
+
# output = f"{question}\n{string.capwords(res['answer'])}\n"
|
| 77 |
+
# return output
|
| 78 |
+
inputs = tokenizer(question, context, return_tensors="pt")
|
| 79 |
+
# Run the model, the deepset way
|
| 80 |
+
with torch.no_grad():
|
| 81 |
+
output = model(**inputs)
|
| 82 |
+
start_score = output.start_logits
|
| 83 |
+
end_score = output.end_logits
|
| 84 |
+
#Get the rel scores for the context, and calculate the most probable begginign using torch
|
| 85 |
+
start = torch.argmax(start_score)
|
| 86 |
+
end = torch.argmax(end_score)
|
| 87 |
+
#cinvert tokens to strings
|
| 88 |
+
# output = tokenizer.decode(input_ids[start:end+1], skip_special_tokens=True)
|
| 89 |
+
predict_answer_tokens = inputs.input_ids[0, start : end + 1]
|
| 90 |
+
output = tokenizer.decode(predict_answer_tokens, skip_special_tokens=True)
|
| 91 |
+
output = string.capwords(output)
|
| 92 |
+
return f"Q. {question} \n Ans. {output}"
|
| 93 |
# QA("What was the first C program","The first prgram written in C was Hello World")
|
| 94 |
|
| 95 |
def gen_question(inputs):
|