InferenceLLM / app.py
edersonmelo's picture
Add application file
b2d61f6
raw
history blame contribute delete
373 Bytes
import gradio as gr
from transformers import pipeline
pipe = pipeline("text-generation",
model="Qwen/Qwen2.5-7B-Instruct")
def gerar(texto):
result = pipe(texto, max_new_tokens=80)
return result[0]["generated_text"]
iface = gr.Interface(
fn=gerar,
inputs="text",
outputs="text",
title="Exemplo simples com Qwen"
)
iface.launch()