# app.py import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline model_id = "tiiuae/falcon-7b-instruct" # Load tokenizer and model with trust_remote_code tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype="auto") # Create pipeline (no use_cache to avoid past_key_values crash) polisher = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) def oxford_polish_strict(sentence: str) -> str: prompt = ( "You are an Oxford grammar professor. " "Rewrite the following sentence in formal written English, following the Oxford University Style Guide. " "Ensure tense matches time expressions (e.g. 'tomorrow' → future, 'yesterday' → past), " "use British spelling, apply the Oxford comma, and correct uncountable nouns naturally.\n\n" f"Sentence: {sentence}\n\nCorrected:" ) out = polisher(prompt, max_new_tokens=80, do_sample=False) return out[0]["generated_text"].split("Corrected:")[-1].strip() # Gradio interface demo = gr.Interface( fn=oxford_polish, inputs=gr.Textbox(lines=2, placeholder="Enter a sentence to correct..."), outputs=gr.Textbox(label="Oxford-style Correction"), title="Oxford Grammar Polisher", description="Rewrite sentences in formal written English using Oxford grammar rules. Powered by Falcon-7B-Instruct." ) demo.launch()