Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from transformers import BartTokenizer, BartForConditionalGeneration | |
| hf_token = os.getenv("HF_TOKEN") # optional unless model is private/gated | |
| # Load model and tokenizer from Hugging Face hub using the provided model name | |
| model_name = "iimran/SAM-TheSummariserV2" | |
| # FIX: use `token=` instead of `use_auth_token=` | |
| tokenizer = BartTokenizer.from_pretrained(model_name, token=hf_token) | |
| model = BartForConditionalGeneration.from_pretrained(model_name, token=hf_token) | |
| def summarize(input_text): | |
| # Tokenize the input text with truncation | |
| inputs = tokenizer(input_text, max_length=1024, truncation=True, return_tensors="pt") | |
| # Generate the summary using beam search | |
| summary_ids = model.generate( | |
| inputs["input_ids"], | |
| num_beams=4, # Use beam search with 4 beams for quality summaries | |
| max_length=128, # Set maximum length for the generated summary | |
| early_stopping=True # Enable early stopping if all beams finish | |
| ) | |
| # Decode the generated summary tokens to a string | |
| summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
| return summary | |
| # Create a Gradio interface | |
| iface = gr.Interface( | |
| fn=summarize, | |
| inputs=gr.Textbox( | |
| label="Enter Text to Summarize", | |
| lines=10, | |
| placeholder="Paste or type the text you want to summarize here..." | |
| ), | |
| outputs=gr.Textbox( | |
| label="Summary", | |
| lines=5, | |
| placeholder="Summary will appear here..." | |
| ), | |
| title="SAM - The Summariser", | |
| description="SAM is a model that summarizes large texts into concise summaries." | |
| ) | |
| # Launch the Gradio interface | |
| iface.launch() |