Loguie commited on
Commit
d0336c8
·
verified ·
1 Parent(s): 8722a33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -21
app.py CHANGED
@@ -5,35 +5,33 @@ import requests
5
  from PIL import Image
6
  from io import BytesIO
7
  # Import necessary libraries
8
- import torch
9
  from transformers import pipeline
10
-
11
- # Check if CUDA (GPU support) is available, for faster processing if you have a GPU
12
- device = 0 if torch.cuda.is_available() else -1
13
 
14
  # Load the BART model for summarization using Hugging Face's pipeline
15
- summarizer = pipeline("summarization", model="facebook/bart-large-cnn", device=device)
16
 
17
- # Function to summarize a given text
18
  def summarize_text(input_text):
19
- # Get the summary from the model
 
 
 
 
20
  summary = summarizer(input_text, max_length=150, min_length=50, do_sample=False)
 
21
  return summary[0]['summary_text']
22
 
23
- # Example text to summarize
24
- text_to_summarize = """
25
- The Hugging Face team has developed several transformers architectures that excel in a variety of NLP tasks.
26
- These architectures are pre-trained on large text corpora and can be fine-tuned for specific tasks like text classification, named entity recognition, and question answering.
27
- The transformers library provides easy-to-use tools for downloading pre-trained models and fine-tuning them, making it simple for developers and researchers to apply state-of-the-art NLP methods to their own tasks.
28
- Hugging Face has made it easier for people to contribute to the world of NLP research by providing open-source libraries that are well-documented and easy to use.
29
- """
30
-
31
- # Calling the function to summarize the text
32
- summary = summarize_text(text_to_summarize)
33
-
34
- # Print the summarized text
35
- print("Original Text: \n", text_to_summarize)
36
- print("\nSummary: \n", summary)
37
 
38
  # Launch the interface
39
  demo.launch()
 
 
5
  from PIL import Image
6
  from io import BytesIO
7
  # Import necessary libraries
 
8
  from transformers import pipeline
9
+ import torch
 
 
10
 
11
  # Load the BART model for summarization using Hugging Face's pipeline
12
+ summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
13
 
14
+ # Prediction function
15
  def summarize_text(input_text):
16
+ # Check if the input text is empty
17
+ if not input_text.strip():
18
+ return {"Error": "Please input some text"}
19
+
20
+ # Use the summarizer to generate the summary
21
  summary = summarizer(input_text, max_length=150, min_length=50, do_sample=False)
22
+
23
  return summary[0]['summary_text']
24
 
25
+ # Gradio interface
26
+ demo = gr.Interface(
27
+ fn=summarize_text,
28
+ inputs=gr.Textbox(label="Enter Text to Summarize", lines=5),
29
+ outputs=gr.Label(label="Summary"),
30
+ title="Text Summarizer using BART",
31
+ description="This app uses the `facebook/bart-large-cnn` model to summarize the text you input.",
32
+ allow_flagging="never"
33
+ )
 
 
 
 
 
34
 
35
  # Launch the interface
36
  demo.launch()
37
+