AItool commited on
Commit
abfe8c9
·
verified ·
1 Parent(s): b9591bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -32
app.py CHANGED
@@ -1,36 +1,52 @@
1
- # app.py
2
-
3
  import gradio as gr
4
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
5
-
6
- model_id = "deep-learning-analytics/GrammarCorrector"
7
-
8
- # Load tokenizer and model
9
- tokenizer = AutoTokenizer.from_pretrained(model_id)
10
- model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
11
-
12
- # Create pipeline
13
- polisher = pipeline("text2text-generation",
14
- model=model,
15
- tokenizer=tokenizer)
16
-
17
- def oxford_polish_strict(sentence: str) -> str:
18
- prompt = (
19
- "Correct this sentence into formal written English, following the Oxford University Style Guide. "
20
- "Ensure tense matches time expressions (e.g. 'tomorrow' → future, 'yesterday' → past), "
21
- "use British spelling, apply the Oxford comma, and correct uncountable nouns naturally. "
22
- "Sentence: " + sentence
23
- )
24
- out = polisher(prompt, max_new_tokens=80, do_sample=False)
25
- return out[0]["generated_text"].strip()
26
-
27
- # Gradio interface
28
- demo = gr.Interface(
29
- fn=oxford_polish_strict,
30
- inputs=gr.Textbox(lines=2, placeholder="Enter a sentence to correct..."),
31
- outputs=gr.Textbox(label="Oxford-style Correction"),
32
- title="Oxford Grammar Polisher",
33
- description="Rewrite sentences in formal written English using Oxford grammar rules. Powered by GrammarCorrector (T5-base)."
34
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  demo.launch()
 
 
 
1
  import gradio as gr
2
+ from diffusers import StableDiffusionImg2ImgPipeline
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+ import torch
5
+
6
+ # --- Device selection: CUDA if available, else CPU ---
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ # --- Load Stable Diffusion for style transfer ---
10
+ sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
11
+ "runwayml/stable-diffusion-v1-5"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  )
13
+ sd_pipe = sd_pipe.to(device)
14
+ sd_pipe.enable_attention_slicing() # helps reduce memory usage
15
+
16
+ # --- Load T5-base or T5-small for grammar correction ---
17
+ model_name = "vennify/t5-base-grammar-correction" # or "prithivida/grammar_error_correcter_v1"
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
20
+
21
+ def style_transfer(input_image, prompt, strength=0.5, guidance=7.5):
22
+ result = sd_pipe(
23
+ prompt=prompt,
24
+ image=input_image,
25
+ strength=strength,
26
+ guidance_scale=guidance
27
+ ).images[0]
28
+ return result
29
+
30
+ def correct_text(text):
31
+ inputs = tokenizer("gec: " + text, return_tensors="pt").to(device)
32
+ outputs = model.generate(**inputs, max_length=128)
33
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+
35
+ # --- Gradio UI ---
36
+ with gr.Blocks() as demo:
37
+ gr.Markdown("## 🎨 Style Transfer + ✍️ Grammar Correction")
38
+
39
+ with gr.Tab("Image Style Transfer"):
40
+ img_in = gr.Image(type="pil")
41
+ prompt = gr.Textbox(label="Style Prompt")
42
+ img_out = gr.Image()
43
+ btn1 = gr.Button("Transfer Style")
44
+ btn1.click(style_transfer, inputs=[img_in, prompt], outputs=img_out)
45
+
46
+ with gr.Tab("Text Correction"):
47
+ txt_in = gr.Textbox(label="Enter text")
48
+ txt_out = gr.Textbox(label="Corrected text")
49
+ btn2 = gr.Button("Correct")
50
+ btn2.click(correct_text, inputs=txt_in, outputs=txt_out)
51
 
52
  demo.launch()