Sunder34m2010 commited on
Commit
ffb50bf
·
verified ·
1 Parent(s): db5ff46

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import torch
4
+ import os
5
+
6
+ # --- 1. Model Loading (Global Scope) ---
7
+
8
+ MODEL_NAME = "facebook/bart-large-cnn"
9
+
10
+ # Check for GPU availability for faster inference on supported hardware
11
+ device = 0 if torch.cuda.is_available() else -1
12
+
13
+ # We initialize the model outside the prediction function for performance.
14
+ # This expensive operation runs only once when the Space loads.
15
+ try:
16
+ print(f"Loading model: {MODEL_NAME} on device: {device}")
17
+ # Use the pipeline abstraction for easy summarization
18
+ summarizer = pipeline(
19
+ "summarization",
20
+ model=MODEL_NAME,
21
+ device=device
22
+ )
23
+ # Check if the model loaded successfully
24
+ if not summarizer:
25
+ raise Exception("Summarization pipeline failed to initialize.")
26
+ except Exception as e:
27
+ # If loading fails (e.g., memory constraints on free tier), print error
28
+ print(f"FATAL ERROR: Failed to load model {MODEL_NAME}. Error: {e}")
29
+ summarizer = None # Set to None to prevent interface from crashing
30
+
31
+ # --- 2. Gradio Prediction Function ---
32
+
33
+ def summarize_text(text_to_summarize, min_length, max_length):
34
+ """
35
+ Function called by the Gradio interface to generate the summary.
36
+ """
37
+ if summarizer is None:
38
+ return "Model failed to load. Please check the Space logs for details (potential memory issue)."
39
+
40
+ if not text_to_summarize or len(text_to_summarize) < 100:
41
+ return "Please enter a longer text (minimum 100 characters) to summarize."
42
+
43
+ # Ensure max_length is greater than min_length
44
+ if max_length <= min_length:
45
+ return "Error: Maximum summary length must be greater than minimum length."
46
+
47
+ try:
48
+ # Call the summarizer pipeline
49
+ summary = summarizer(
50
+ text_to_summarize,
51
+ min_length=min_length,
52
+ max_length=max_length,
53
+ do_sample=False # Use greedy decoding for consistent results
54
+ )
55
+
56
+ # The output of pipeline is a list of dictionaries, we extract the text
57
+ return summary[0]['summary_text']
58
+
59
+ except Exception as e:
60
+ return f"An error occurred during summarization: {e}"
61
+
62
+ # --- 3. Gradio Interface Setup ---
63
+
64
+ # Define the input components
65
+ input_text = gr.Textbox(
66
+ label="Text to Summarize",
67
+ lines=10,
68
+ placeholder="Paste a long article here (e.g., news, literature, etc.).",
69
+ value="The rapid advancement of artificial intelligence has led to a major shift in how businesses operate. Companies are leveraging large language models to automate customer service, generate marketing content, and streamline data analysis. This technological revolution requires a new focus on data privacy and ethical AI development to ensure that these powerful tools are used responsibly and equitably across the globe. Experts predict that within the next five years, AI will be an integral part of nearly every sector, from healthcare to finance."
70
+ )
71
+
72
+ min_len_slider = gr.Slider(
73
+ minimum=10,
74
+ maximum=50,
75
+ value=20,
76
+ step=5,
77
+ label="Minimum Summary Length (tokens)"
78
+ )
79
+
80
+ max_len_slider = gr.Slider(
81
+ minimum=50,
82
+ maximum=200,
83
+ value=100,
84
+ step=10,
85
+ label="Maximum Summary Length (tokens)"
86
+ )
87
+
88
+ # Define the output component
89
+ output_text = gr.Textbox(
90
+ label="Generated Summary",
91
+ lines=5
92
+ )
93
+
94
+ # Create the Gradio Interface
95
+ gr.Interface(
96
+ fn=summarize_text,
97
+ inputs=[input_text, min_len_slider, max_len_slider],
98
+ outputs=output_text,
99
+ title="📰 Hugging Face Summarization with BART-Large-CNN (Gradio)",
100
+ description="This demo uses the 'facebook/bart-large-cnn' model to generate a concise summary from the input text."
101
+ ).launch()