Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| from peft import PeftModel | |
| import torch | |
| # Load model and tokenizer | |
| print("Loading model...") | |
| tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base") | |
| base_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base") | |
| # Replace with your actual model repository name | |
| model = PeftModel.from_pretrained(base_model, "noviciusss/flan-t5-base-samsum") | |
| model.eval() | |
| print("Model loaded successfully!") | |
| def summarize_dialogue(dialogue, max_length=128, num_beams=4): | |
| """Summarizes a dialogue using the fine-tuned FLAN-T5 model.""" | |
| if not dialogue.strip(): | |
| return "Please enter a dialogue to summarize." | |
| # Prepare input | |
| input_text = "summarize: " + dialogue | |
| inputs = tokenizer( | |
| input_text, | |
| return_tensors="pt", | |
| max_length=512, | |
| truncation=True | |
| ) | |
| # Generate summary | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_length=int(max_length), | |
| num_beams=int(num_beams), | |
| early_stopping=True, | |
| no_repeat_ngram_size=3 | |
| ) | |
| # Decode and return | |
| summary = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return summary | |
| # Example dialogues with ALL input values | |
| examples = [ | |
| ["""Hannah: Hey, did you see the game last night? | |
| Adam: Yeah! That last-minute goal was incredible! | |
| Hannah: I know right? Best match of the season so far. | |
| Adam: Totally agree. We should watch the next game together. | |
| Hannah: For sure! I'll bring snacks.""", 128, 4], | |
| ["""Mike: Can you pick up some groceries on your way home? | |
| Sarah: Sure, what do we need? | |
| Mike: Milk, bread, and eggs. Oh, and some coffee. | |
| Sarah: Got it. Anything else? | |
| Mike: That's all. Thanks!""", 128, 4], | |
| ["""Emma: What time is the meeting tomorrow? | |
| John: It's at 2 PM in the conference room. | |
| Emma: Great, I'll prepare the presentation. | |
| John: Perfect. See you then!""", 96, 4] | |
| ] | |
| # Create Gradio interface | |
| demo = gr.Interface( | |
| fn=summarize_dialogue, | |
| inputs=[ | |
| gr.Textbox( | |
| label="Dialogue", | |
| placeholder="Enter a conversation to summarize...", | |
| lines=10 | |
| ), | |
| gr.Slider( | |
| minimum=32, | |
| maximum=256, | |
| value=128, | |
| step=16, | |
| label="Max Summary Length" | |
| ), | |
| gr.Slider( | |
| minimum=1, | |
| maximum=8, | |
| value=4, | |
| step=1, | |
| label="Number of Beams" | |
| ) | |
| ], | |
| outputs=gr.Textbox(label="Summary", lines=3), | |
| examples=examples, | |
| cache_examples=False, # Disable caching to avoid the error | |
| title="🗨️ Dialogue Summarizer", | |
| description=""" | |
| This app summarizes conversational dialogues using a **FLAN-T5 base** model fine-tuned with **LoRA** on the SAMSum dataset. | |
| **Performance Metrics:** | |
| - ROUGE-1: 49.01 | ROUGE-2: 25.06 | ROUGE-L: 40.97 | |
| - BERTScore F1: 72.25 | METEOR: 42.51 | |
| Enter a dialogue or try one of the examples below! | |
| """, | |
| article=""" | |
| <div style='text-align: center; margin-top: 20px;'> | |
| <p>Model: <a href='https://huggingface.co/YOUR_USERNAME/SamSum' target='_blank'>noviciusss/flan-t5-base-samsum</a></p> | |
| <p>Built with 🤗 Transformers, PEFT, and Gradio</p> | |
| </div> | |
| """, | |
| theme=gr.themes.Soft(), | |
| allow_flagging="never" | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.launch() | |