assoum-bot's picture
Update app.py
e1a7d15 verified
import gradio as gr
from transformers import pipeline
import torch # We'll check if GPU is available, though CPU is likely our only option
# Load a very small, efficient Arabic model for text generation
# This model is much smaller and should fit the free CPU constraints
generator = pipeline("text-generation", model="asafaya/albert-base-arabic")
def generate_comeback_efficient(situation):
# Prompt the model to provide a witty response
prompt = f"الموقف: {situation}\nالرد الذكي والسريع:"
# Generate the response with a small max_length for efficiency
# Using 'do_sample=True' can lead to more creative (and sometimes funnier) responses
result = generator(prompt, max_length=60, num_return_sequences=1, truncation=True, do_sample=True, top_k=50, top_p=0.95)
comeback = result[0]['generated_text']
# Clean up the output to only include the comeback part
if "الرد الذكي والسريع:" in comeback:
comeback = comeback.split("الرد الذكي والسريع:")[-1].strip()
return comeback
# Create the Gradio interface
interface = gr.Interface(
fn=generate_comeback_efficient,
inputs=gr.Textbox(lines=2, label="اكتب الموقف المحرج أو السؤال هنا"),
outputs=gr.Textbox(lines=3, label="الرد الذكي!"),
title="قاصف الجبهات الذكي (إصدار خفيف) - بواسطة عصوم",
description="مولد ردود ذكية ومضحكة للمواقف المحرجة، يعمل بكفاءة على المعالج المجاني! تطوير Assoum!"
)
# Launch the app
interface.launch()