DemoDACMini / app.py
Mattimax's picture
Update app.py
86a0807 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
MODEL_NAME = "Mattimax/DACMini-IT"
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(device)
def chat_fn(message, history):
inputs = tokenizer(message, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=150,
do_sample=True,
top_p=0.9,
temperature=0.7
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
response = response.replace("<|assistant|>", "").replace("<|user|>", "").strip()
return response
demo = gr.ChatInterface(
fn=chat_fn,
title="💬 Demo DACMini-IT",
description="Una semplice demo del modello italiano DACMini-IT. Scrivi un messaggio e il modello risponde.",
theme="soft",
examples=[
"Ciao, come stai?",
"Raccontami una curiosità sulla lingua italiana.",
"Scrivi una breve poesia."
]
)
if __name__ == "__main__":
demo.launch()