| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| import torch | |
| model_id = "GannaEslam38/Pegasus-Arxiv-Generator" | |
| print("π Loading Model...") | |
| try: | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_id) | |
| print("β Model Loaded!") | |
| except Exception as e: | |
| print(f"β Error loading model: {e}") | |
| def generate_text(prompt): | |
| print(f"π© Input received: {prompt}") | |
| if len(prompt.split()) < 3: | |
| return "β οΈ text is too short, please write a full sentence." | |
| try: | |
| inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True) | |
| summary_ids = model.generate( | |
| inputs["input_ids"], | |
| max_length=120, | |
| min_length=10, | |
| num_beams=1, | |
| early_stopping=True | |
| ) | |
| decoded = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
| cleaned_text = decoded.replace("<n>", " ").replace(" .", ".").strip() | |
| return cleaned_text | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| interface = gr.Interface( | |
| fn=generate_text, | |
| inputs=gr.Textbox(lines=5, label="Input Text", placeholder="Write your topic here..."), | |
| outputs=gr.Textbox(lines=10, label="Generated Content"), | |
| title="Generative AI Project", | |
| description="Fine-tuned Pegasus Model.", | |
| cache_examples=False | |
| ) | |
| if __name__ == "__main__": | |
| interface.launch() |