Spaces:
Sleeping
Sleeping
TEST_8
#9
by ThieLin - opened
app.py
CHANGED
|
@@ -4,16 +4,12 @@ from sentence_transformers import SentenceTransformer, util
|
|
| 4 |
|
| 5 |
class ModelComparator:
|
| 6 |
def __init__(self):
|
| 7 |
-
# Modelo de QA (mais rápido e leve)
|
| 8 |
self.qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
|
| 9 |
-
|
| 10 |
-
self.text_gen_pipeline = pipeline("text-generation", model="gpt2", max_new_tokens=50)
|
| 11 |
-
# Modelo para embeddings e similaridade
|
| 12 |
self.sim_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 13 |
|
| 14 |
def get_qa_answer(self, question, context=None):
|
| 15 |
-
|
| 16 |
-
if context is None:
|
| 17 |
return "No context provided for QA model."
|
| 18 |
try:
|
| 19 |
result = self.qa_pipeline(question=question, context=context)
|
|
@@ -24,7 +20,6 @@ class ModelComparator:
|
|
| 24 |
def get_text_gen_answer(self, prompt):
|
| 25 |
try:
|
| 26 |
generated = self.text_gen_pipeline(prompt)[0]['generated_text']
|
| 27 |
-
# O GPT2 gera o texto incluindo o prompt, vamos remover o prompt para deixar só resposta
|
| 28 |
answer = generated[len(prompt):].strip()
|
| 29 |
return answer if answer else generated.strip()
|
| 30 |
except Exception as e:
|
|
@@ -45,14 +40,12 @@ class ModelComparator:
|
|
| 45 |
f"Model GPT-2 generated answer:\n{gen_answer}\n\n"
|
| 46 |
f"Semantic similarity score: {similarity}")
|
| 47 |
|
| 48 |
-
# Interface Gradio
|
| 49 |
-
|
| 50 |
model_comparator = ModelComparator()
|
| 51 |
|
| 52 |
with gr.Blocks() as demo:
|
| 53 |
-
gr.Markdown("## Comparador
|
| 54 |
question_input = gr.Textbox(label="Pergunta")
|
| 55 |
-
context_input = gr.Textbox(label="Contexto para o modelo de QA (opcional)", lines=
|
| 56 |
output = gr.Textbox(label="Respostas e Similaridade", lines=15)
|
| 57 |
btn = gr.Button("Comparar")
|
| 58 |
|
|
|
|
| 4 |
|
| 5 |
class ModelComparator:
|
| 6 |
def __init__(self):
|
|
|
|
| 7 |
self.qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
|
| 8 |
+
self.text_gen_pipeline = pipeline("text-generation", model="gpt2", max_new_tokens=20) # menor geração
|
|
|
|
|
|
|
| 9 |
self.sim_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 10 |
|
| 11 |
def get_qa_answer(self, question, context=None):
|
| 12 |
+
if not context:
|
|
|
|
| 13 |
return "No context provided for QA model."
|
| 14 |
try:
|
| 15 |
result = self.qa_pipeline(question=question, context=context)
|
|
|
|
| 20 |
def get_text_gen_answer(self, prompt):
|
| 21 |
try:
|
| 22 |
generated = self.text_gen_pipeline(prompt)[0]['generated_text']
|
|
|
|
| 23 |
answer = generated[len(prompt):].strip()
|
| 24 |
return answer if answer else generated.strip()
|
| 25 |
except Exception as e:
|
|
|
|
| 40 |
f"Model GPT-2 generated answer:\n{gen_answer}\n\n"
|
| 41 |
f"Semantic similarity score: {similarity}")
|
| 42 |
|
|
|
|
|
|
|
| 43 |
model_comparator = ModelComparator()
|
| 44 |
|
| 45 |
with gr.Blocks() as demo:
|
| 46 |
+
gr.Markdown("## Comparador rápido para Hugging Face Spaces")
|
| 47 |
question_input = gr.Textbox(label="Pergunta")
|
| 48 |
+
context_input = gr.Textbox(label="Contexto para o modelo de QA (opcional)", lines=3)
|
| 49 |
output = gr.Textbox(label="Respostas e Similaridade", lines=15)
|
| 50 |
btn = gr.Button("Comparar")
|
| 51 |
|