Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| import torch | |
| import os | |
| # --- 1. Model Loading (Global Scope) --- | |
| MODEL_NAME = "facebook/bart-large-cnn" | |
| # Check for GPU availability for faster inference on supported hardware | |
| device = 0 if torch.cuda.is_available() else -1 | |
| # We initialize the model outside the prediction function for performance. | |
| # This expensive operation runs only once when the Space loads. | |
| try: | |
| print(f"Loading model: {MODEL_NAME} on device: {device}") | |
| # Use the pipeline abstraction for easy summarization | |
| summarizer = pipeline( | |
| "summarization", | |
| model=MODEL_NAME, | |
| device=device | |
| ) | |
| # Check if the model loaded successfully | |
| if not summarizer: | |
| raise Exception("Summarization pipeline failed to initialize.") | |
| except Exception as e: | |
| # If loading fails (e.g., memory constraints on free tier), print error | |
| print(f"FATAL ERROR: Failed to load model {MODEL_NAME}. Error: {e}") | |
| summarizer = None # Set to None to prevent interface from crashing | |
| # --- 2. Gradio Prediction Function --- | |
| def summarize_text(text_to_summarize, min_length, max_length): | |
| """ | |
| Function called by the Gradio interface to generate the summary. | |
| """ | |
| if summarizer is None: | |
| return "Model failed to load. Please check the Space logs for details (potential memory issue)." | |
| if not text_to_summarize or len(text_to_summarize) < 100: | |
| return "Please enter a longer text (minimum 100 characters) to summarize." | |
| # Ensure max_length is greater than min_length | |
| if max_length <= min_length: | |
| return "Error: Maximum summary length must be greater than minimum length." | |
| try: | |
| # Call the summarizer pipeline | |
| summary = summarizer( | |
| text_to_summarize, | |
| min_length=min_length, | |
| max_length=max_length, | |
| do_sample=False # Use greedy decoding for consistent results | |
| ) | |
| # The output of pipeline is a list of dictionaries, we extract the text | |
| return summary[0]['summary_text'] | |
| except Exception as e: | |
| return f"An error occurred during summarization: {e}" | |
| # --- 3. Gradio Interface Setup --- | |
| # Define the input components | |
| input_text = gr.Textbox( | |
| label="Text to Summarize", | |
| lines=10, | |
| placeholder="Paste a long article here (e.g., news, literature, etc.).", | |
| value="The rapid advancement of artificial intelligence has led to a major shift in how businesses operate. Companies are leveraging large language models to automate customer service, generate marketing content, and streamline data analysis. This technological revolution requires a new focus on data privacy and ethical AI development to ensure that these powerful tools are used responsibly and equitably across the globe. Experts predict that within the next five years, AI will be an integral part of nearly every sector, from healthcare to finance." | |
| ) | |
| min_len_slider = gr.Slider( | |
| minimum=10, | |
| maximum=50, | |
| value=20, | |
| step=5, | |
| label="Minimum Summary Length (tokens)" | |
| ) | |
| max_len_slider = gr.Slider( | |
| minimum=50, | |
| maximum=200, | |
| value=100, | |
| step=10, | |
| label="Maximum Summary Length (tokens)" | |
| ) | |
| # Define the output component | |
| output_text = gr.Textbox( | |
| label="Generated Summary", | |
| lines=5 | |
| ) | |
| # Create the Gradio Interface | |
| gr.Interface( | |
| fn=summarize_text, | |
| inputs=[input_text, min_len_slider, max_len_slider], | |
| outputs=output_text, | |
| title="📰 Hugging Face Summarization with BART-Large-CNN (Gradio)", | |
| description="This demo uses the 'facebook/bart-large-cnn' model to generate a concise summary from the input text." | |
| ).launch() |