File size: 1,089 Bytes
ea2e686 0f14d6f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import gradio as gr
from transformers import pipeline
# ✅ Use a lightweight, always-free model
jarvis = pipeline("text2text-generation", model="google/flan-t5-small")
# Function to handle messages (compatible with new Gradio)
def chat(message, history):
# Reconstruct a text-based chat history
context = ""
for h in history:
context += f"User: {h['content']}\nJarvis: {h.get('response', '')}\n"
context += f"User: {message}\nJarvis:"
# Generate response
response = jarvis(context, max_new_tokens=128, temperature=0.7, do_sample=True)
reply = response[0]["generated_text"]
# Return message in new Gradio "messages" format
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": reply})
return "", history
# Gradio Chat Interface (new style)
gr.ChatInterface(
fn=chat,
title="Jarvis AI V2",
description="Your personal AI assistant — accessible anywhere in the world.",
theme="soft",
type="messages", # 👈 important to avoid tuple errors
).launch(share=True)
|