Spaces:
Sleeping
Sleeping
File size: 2,347 Bytes
a415299 e21a6e7 1869c5e b488926 1869c5e 767e590 1869c5e a415299 1869c5e 767e590 1869c5e e21a6e7 1869c5e e21a6e7 1869c5e a415299 1869c5e af19806 1869c5e af19806 1869c5e af19806 7b3747b 767e590 1869c5e 767e590 e21a6e7 1869c5e 7b3747b 1869c5e a415299 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 | import gradio as gr
from transformers import pipeline
from sentence_transformers import SentenceTransformer, util
class ModelComparator:
def __init__(self):
self.qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
self.text_gen_pipeline = pipeline("text-generation", model="gpt2", max_new_tokens=20) # menor geração
self.sim_model = SentenceTransformer("all-MiniLM-L6-v2")
def get_qa_answer(self, question, context=None):
if not context:
return "No context provided for QA model."
try:
result = self.qa_pipeline(question=question, context=context)
return result['answer']
except Exception as e:
return f"Error in QA pipeline: {e}"
def get_text_gen_answer(self, prompt):
try:
generated = self.text_gen_pipeline(prompt)[0]['generated_text']
answer = generated[len(prompt):].strip()
return answer if answer else generated.strip()
except Exception as e:
return f"Error in text generation pipeline: {e}"
def compare_answers(self, answer1, answer2):
emb1 = self.sim_model.encode(answer1, convert_to_tensor=True)
emb2 = self.sim_model.encode(answer2, convert_to_tensor=True)
similarity = util.cos_sim(emb1, emb2).item()
return round(similarity, 3)
def respond(self, question, context):
qa_answer = self.get_qa_answer(question, context)
gen_answer = self.get_text_gen_answer(question)
similarity = self.compare_answers(qa_answer, gen_answer)
return (f"Model QA answer:\n{qa_answer}\n\n"
f"Model GPT-2 generated answer:\n{gen_answer}\n\n"
f"Semantic similarity score: {similarity}")
model_comparator = ModelComparator()
with gr.Blocks() as demo:
gr.Markdown("## Comparador rápido para Hugging Face Spaces")
question_input = gr.Textbox(label="Pergunta")
context_input = gr.Textbox(label="Contexto para o modelo de QA (opcional)", lines=3)
output = gr.Textbox(label="Respostas e Similaridade", lines=15)
btn = gr.Button("Comparar")
btn.click(
fn=model_comparator.respond,
inputs=[question_input, context_input],
outputs=output
)
if __name__ == "__main__":
demo.launch()
|