text-assistant / app.py
Hamza4100's picture
Update app.py
876ff30 verified
import gradio as gr
from transformers import pipeline
# Load the Hugging Face model for text tasks
text_generator = pipeline("text2text-generation", model="google/flan-t5-small")
# --- Helper functions ---
def summarize_text(text):
prompt = f"Summarize in 3 sentences: {text}"
result = text_generator(prompt, max_length=100, do_sample=False)
return result[0]['generated_text']
def answer_question(topic, question):
prompt = f"Topic: {topic}\nAnswer this question: {question}"
result = text_generator(prompt, max_length=150, do_sample=False)
return result[0]['generated_text']
def generate_story(theme):
prompt = f"Write a 200-word story about: {theme}"
result = text_generator(prompt, max_length=300, do_sample=True)
return result[0]['generated_text']
# --- Gradio UI ---
with gr.Blocks() as demo:
gr.Markdown("# Mini AI Text Assistant")
task = gr.Radio(["Summarizer", "Q&A Bot", "Story Generator"], label="Choose a task")
input_text = gr.Textbox(label="Enter text or topic/theme", placeholder="Type here...")
input_question = gr.Textbox(label="Enter question (for Q&A only)", visible=False)
output = gr.Textbox(label="Output")
def process(task_choice, text, question):
if task_choice == "Summarizer":
return summarize_text(text)
elif task_choice == "Q&A Bot":
return answer_question(text, question)
elif task_choice == "Story Generator":
return generate_story(text)
task.change(lambda t: gr.update(visible=(t=="Q&A Bot")), inputs=task, outputs=input_question)
btn = gr.Button("Run")
btn.click(process, inputs=[task, input_text, input_question], outputs=output)
demo.launch()