Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import torch | |
| import gradio as gr | |
| # Load model and tokenizer | |
| model_name = 'cross-encoder/ms-marco-MiniLM-L6-v2' | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model.eval() | |
| # Define inference function | |
| def get_similarity(question, answer): | |
| features = tokenizer(question, answer, padding=True, truncation=True, return_tensors="pt") | |
| with torch.no_grad(): | |
| score = model(**features).logits | |
| return float(score[0][0]) # Convert tensor to float | |
| # Create Gradio interface | |
| iface = gr.Interface( | |
| fn=get_similarity, | |
| inputs=[ | |
| gr.Textbox(label="Question"), | |
| gr.Textbox(label="Answer") | |
| ], | |
| outputs=gr.Number(label="Similarity Score"), | |
| title="Cross-Encoder QA Relevance" | |
| ) | |
| iface.launch() |