Spaces:
Build error
Build error
| import gradio as gr | |
| import torch | |
| import numpy as np | |
| from transformers import AutoModelForSequenceClassification | |
| # Load ONLY the model, NOT the tokenizer | |
| model = AutoModelForSequenceClassification.from_pretrained( | |
| "Kevintu/Engessay_grading_ML") | |
| def process_embeddings(embeddings_array): | |
| # Convert the received embeddings to the format expected by the model | |
| embeddings_tensor = torch.tensor(embeddings_array) | |
| # Process embeddings with the model | |
| model.eval() | |
| with torch.no_grad(): | |
| # Create a dict with the expected input format | |
| model_inputs = { | |
| 'input_ids': None, # Not needed since we're using embeddings directly | |
| 'attention_mask': None, # Not needed for this use case | |
| 'inputs_embeds': embeddings_tensor # Pass embeddings directly | |
| } | |
| outputs = model(**model_inputs) | |
| predictions = outputs.logits.squeeze() | |
| item_names = ["cohesion", "syntax", "vocabulary", | |
| "phraseology", "grammar", "conventions"] | |
| scaled_scores = 2.25 * predictions.numpy() - 1.25 | |
| rounded_scores = [round(score * 2) / 2 for score in scaled_scores] | |
| results = {item: f"{score:.1f}" for item, | |
| score in zip(item_names, rounded_scores)} | |
| return results | |
| # Create Gradio interface for embeddings input | |
| demo = gr.Interface( | |
| fn=process_embeddings, | |
| inputs=gr.JSON(label="Embeddings"), | |
| outputs=gr.JSON(label="Scores"), | |
| title="Essay Grading API (Embeddings Only)", | |
| description="Grade essays based on precomputed embeddings" | |
| ) | |
| demo.queue() | |
| demo.launch() | |