GannaEslam38 commited on
Commit
5abbf36
·
verified ·
1 Parent(s): 4f07e78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -27
app.py CHANGED
@@ -1,40 +1,50 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
 
3
 
4
  model_id = "GannaEslam38/Pegasus-Arxiv-Generator"
5
 
6
- print("Loading model...")
7
- tokenizer = AutoTokenizer.from_pretrained(model_id)
8
- model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
 
 
 
 
9
 
10
  def generate_text(prompt):
11
-
12
- inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
13
-
14
-
15
- summary_ids = model.generate(
16
- inputs["input_ids"],
17
- max_length=150,
18
- min_length=30,
19
- length_penalty=2.0,
20
- num_beams=4,
21
- early_stopping=True
22
- )
23
-
24
-
25
- decoded = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
26
- cleaned_text = decoded.replace("<n>", " ").replace(" .", ".").strip()
27
-
28
- return cleaned_text
29
-
30
 
31
  interface = gr.Interface(
32
  fn=generate_text,
33
- inputs=gr.Textbox(lines=5, label="Input Text", placeholder="Write about AI..."),
34
- outputs=gr.Textbox(label="Generated Content"),
 
 
 
35
  title="Generative AI Project",
36
- description="This model generates academic-style content using fine-tuned Pegasus.",
37
- examples=[["Artificial intelligence is transforming the world"]]
 
38
  )
39
 
40
- interface.launch()
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import torch
4
 
5
  model_id = "GannaEslam38/Pegasus-Arxiv-Generator"
6
 
7
+ print("🔄 Loading Model...")
8
+ try:
9
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
10
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
11
+ print("✅ Model Loaded!")
12
+ except Exception as e:
13
+ print(f"❌ Error loading model: {e}")
14
 
15
  def generate_text(prompt):
16
+ print(f"📩 Input received: {prompt}")
17
+ try:
18
+ inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
19
+
20
+ summary_ids = model.generate(
21
+ inputs["input_ids"],
22
+ max_length=120,
23
+ min_length=10,
24
+ num_beams=1,
25
+ early_stopping=True
26
+ )
27
+
28
+ decoded = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
29
+ cleaned_text = decoded.replace("<n>", " ").replace(" .", ".").strip()
30
+
31
+ return cleaned_text
32
+
33
+ except Exception as e:
34
+ return f"Error: {str(e)}"
35
 
36
  interface = gr.Interface(
37
  fn=generate_text,
38
+
39
+ inputs=gr.Textbox(lines=5, label="Input Text", placeholder="Write your topic here..."),
40
+
41
+ outputs=gr.Textbox(lines=10, label="Generated Content"),
42
+
43
  title="Generative AI Project",
44
+ description="Fine-tuned Pegasus Model.",
45
+ examples=[["Artificial intelligence is transforming the world"]],
46
+ cache_examples=False
47
  )
48
 
49
+ if __name__ == "__main__":
50
+ interface.launch()