File size: 1,404 Bytes
099c618
 
1dc1e8c
 
e69cfce
 
 
 
4a746b5
 
 
 
 
 
 
e69cfce
 
b7984b2
 
 
4a746b5
 
 
b7984b2
4a746b5
 
e69cfce
099c618
e69cfce
4a746b5
 
493c901
b7984b2
 
6e54c4c
b7984b2
e69cfce
 
 
b7984b2
4a746b5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import gradio as gr
from transformers import pipeline
import torch

# Check if GPU is available for FP16 inference
device = 0 if torch.cuda.is_available() else -1
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32

# Load Instruction-Tuned Summarization Model
summarization = pipeline(
    "text2text-generation",
    model="google/flan-t5-base",
    device=device,
    torch_dtype=torch_dtype
)

def summarize_text(text):
    # Ensure the input is not too short for summarization
    if len(text.split()) < 30:
        return "Please provide a longer text (at least 30 words) for better summarization."

    # Prompt-based instruction
    prompt = f"Summarize the following text in 3 to 5 clear and concise sentences, highlighting the main points and key takeaways:\n\n{text}"
    
    output = summarization(prompt, max_length=256, do_sample=False)
    return output[0]['generated_text']

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# 🤖 Instruction-based Text Summarization (FLAN-T5)")

    with gr.Tab("Text Summarization"):
        summary_input = gr.Textbox(label="Text to Summarize", lines=10, placeholder="Paste long text here...")
        summary_btn = gr.Button("Summarize")
        summary_output = gr.Textbox(label="Summary")
        summary_btn.click(fn=summarize_text, inputs=summary_input, outputs=summary_output)

# Launch App
demo.launch()