Workspace / app.py
ThieLin's picture
TEST_6
7b31077 verified
raw
history blame
1.92 kB
import gradio as gr
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer, util
from transformers import pipeline
# Modelos
chat_model_zephyr = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
chat_model_gpt2 = pipeline("text-generation", model="gpt2", max_new_tokens=100)
# Similaridade
similarity_model = SentenceTransformer("all-MiniLM-L6-v2")
def get_zephyr_response(question):
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question}
]
response = chat_model_zephyr.chat_completion(
messages,
max_tokens=256,
temperature=0.7,
top_p=0.95,
)
return response.choices[0].message.content.strip()
def get_gpt2_response(question):
generated = chat_model_gpt2(question)[0]["generated_text"]
return generated.strip()
def compare_answers(answer1, answer2):
emb1 = similarity_model.encode(answer1, convert_to_tensor=True)
emb2 = similarity_model.encode(answer2, convert_to_tensor=True)
similarity = util.cos_sim(emb1, emb2).item()
return round(similarity, 3)
def respond(question):
answer_zephyr = get_zephyr_response(question)
answer_gpt2 = get_gpt2_response(question)
similarity = compare_answers(answer_zephyr, answer_gpt2)
return (
f"🧠 Zephyr-7b:\n{answer_zephyr}\n\n"
f"🤖 GPT-2:\n{answer_gpt2}\n\n"
f"🔍 Similaridade Semântica: **{similarity}**"
)
with gr.Blocks() as demo:
gr.Markdown("# 🤖 Comparador de Respostas (sem contexto)\nDigite uma pergunta e veja as respostas de dois modelos.")
question = gr.Textbox(label="Pergunta")
submit = gr.Button("Comparar Respostas")
output = gr.Textbox(label="Respostas e Similaridade", lines=15)
submit.click(respond, inputs=question, outputs=output)
if __name__ == "__main__":
demo.launch()