Alexvatti commited on
Commit
b7984b2
·
verified ·
1 Parent(s): fed13b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -19
app.py CHANGED
@@ -1,40 +1,32 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
- import spaces
4
  import torch
5
 
6
- zero = torch.Tensor([0]).cuda()
7
- print(zero.device)
8
-
9
-
10
  # Check if GPU is available for FP16 inference
11
  device = 0 if torch.cuda.is_available() else -1
12
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
13
 
14
- # Load Pipelines with FP16 (if GPU available)
15
- summarization = pipeline("summarization", model="facebook/bart-large-cnn", device=device)
16
-
17
 
18
- @spaces.GPU
19
  def summarize_text(text):
 
 
 
 
20
  output = summarization(text, max_length=100, min_length=30, do_sample=False)
21
  return output[0]['summary_text']
22
 
23
-
24
-
25
-
26
  # Gradio Interface
27
  with gr.Blocks() as demo:
28
- gr.Markdown("# 🤖 Transformers Pipeline with FP16 Inference")
29
 
30
-
31
  with gr.Tab("Text Summarization"):
32
- summary_input = gr.Textbox(label="Text to Summarize ", lines=5, placeholder="Paste long text here...")
33
- summary_btn = gr.Button("Summarize")
34
  summary_output = gr.Textbox(label="Summary")
35
- summary_btn.click(summarize_text, inputs=summary_input, outputs=summary_output)
36
-
37
-
38
 
39
  # Launch App
40
  demo.launch()
 
 
1
  import gradio as gr
2
  from transformers import pipeline
 
3
  import torch
4
 
 
 
 
 
5
  # Check if GPU is available for FP16 inference
6
  device = 0 if torch.cuda.is_available() else -1
7
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
8
 
9
+ # Load Summarization Pipeline
10
+ summarization = pipeline("summarization", model="facebook/bart-large-cnn", device=device, torch_dtype=torch_dtype)
 
11
 
 
12
  def summarize_text(text):
13
+ # Ensure the input is not too short for summarization
14
+ if len(text.split()) < 30:
15
+ return "Please provide a longer text (at least 30 words) for better summarization."
16
+
17
  output = summarization(text, max_length=100, min_length=30, do_sample=False)
18
  return output[0]['summary_text']
19
 
 
 
 
20
  # Gradio Interface
21
  with gr.Blocks() as demo:
22
+ gr.Markdown("# 🤖 Text Summarization with FP16 Inference")
23
 
 
24
  with gr.Tab("Text Summarization"):
25
+ summary_input = gr.Textbox(label="Text to Summarize", lines=10, placeholder="Paste long text here...")
 
26
  summary_output = gr.Textbox(label="Summary")
27
+ summary_btn = gr.Button("Summarize")
28
+ summary_btn.click(fn=summarize_text, inputs=summary_input, outputs=summary_output)
 
29
 
30
  # Launch App
31
  demo.launch()
32
+