import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline # Available models MODEL_OPTIONS = { "Prithivida GEC v1": "prithivida/grammar_error_correcter_v1", "Hassaanik GEC": "hassaanik/grammar-correction-model", "Vennify T5 GEC": "vennify/t5-base-grammar-correction" } # Cache loaded pipelines so we don’t reload every time loaded_pipelines = {} def get_pipeline(model_id): if model_id not in loaded_pipelines: tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) loaded_pipelines[model_id] = pipeline("text2text-generation", model=model, tokenizer=tokenizer) return loaded_pipelines[model_id] def oxford_polish(sentence: str, model_choice: str) -> str: model_id = MODEL_OPTIONS[model_choice] polisher = get_pipeline(model_id) prompt = ( "Correct this sentence into formal written English, following the Oxford University Style Guide. " "Ensure tense matches time expressions (e.g. 'tomorrow' → future, 'yesterday' → past), " "use British spelling, apply the Oxford comma, and correct uncountable nouns naturally. " "Sentence: " + sentence ) out = polisher(prompt, max_new_tokens=80, do_sample=False) return out[0]["generated_text"].strip() # Gradio interface demo = gr.Interface( fn=oxford_polish, inputs=[ gr.Textbox(lines=2, placeholder="Enter a sentence to correct..."), gr.Dropdown(choices=list(MODEL_OPTIONS.keys()), value="Prithivida GEC v1", label="Choose Model") ], outputs=gr.Textbox(label="Oxford-style Correction"), title="Oxford Grammar Polisher", description="Test multiple free grammar correction models from Hugging Face Hub with Oxford-grammar rules." ) demo.launch()