Spaces:
Sleeping
Sleeping
File size: 1,918 Bytes
9639dd1 fb10678 8d786a7 18a628a 9639dd1 18a628a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# Available models
MODEL_OPTIONS = {
"Prithivida GEC v1": "prithivida/grammar_error_correcter_v1",
"Hassaanik GEC": "hassaanik/grammar-correction-model",
"Vennify T5 GEC": "vennify/t5-base-grammar-correction"
}
# Cache loaded pipelines so we don’t reload every time
loaded_pipelines = {}
def get_pipeline(model_id):
if model_id not in loaded_pipelines:
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
loaded_pipelines[model_id] = pipeline("text2text-generation",
model=model,
tokenizer=tokenizer)
return loaded_pipelines[model_id]
def oxford_polish(sentence: str, model_choice: str) -> str:
model_id = MODEL_OPTIONS[model_choice]
polisher = get_pipeline(model_id)
prompt = (
"Correct this sentence into formal written English, following the Oxford University Style Guide. "
"Ensure tense matches time expressions (e.g. 'tomorrow' → future, 'yesterday' → past), "
"use British spelling, apply the Oxford comma, and correct uncountable nouns naturally. "
"Sentence: " + sentence
)
out = polisher(prompt, max_new_tokens=80, do_sample=False)
return out[0]["generated_text"].strip()
# Gradio interface
demo = gr.Interface(
fn=oxford_polish,
inputs=[
gr.Textbox(lines=2, placeholder="Enter a sentence to correct..."),
gr.Dropdown(choices=list(MODEL_OPTIONS.keys()), value="Prithivida GEC v1", label="Choose Model")
],
outputs=gr.Textbox(label="Oxford-style Correction"),
title="Oxford Grammar Polisher",
description="Test multiple free grammar correction models from Hugging Face Hub with Oxford-grammar rules."
)
demo.launch()
|