Qwen2.5-GenX-7B / app.py
sharp8's picture
Update app.py
eec43e1 verified
raw
history blame contribute delete
808 Bytes
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
MODEL_ID = "INTERX/Qwen2.5-GenX-7B"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, device_map="auto", torch_dtype="auto", trust_remote_code=True)
def chat(user_input):
messages = [{"role": "user", "content": user_input}]
inputs = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt"
).to(model.device)
output = model.generate(inputs, max_new_tokens=512)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
gr.Interface(fn=chat, inputs="text", outputs="text", title="Chat con Qwen2.5-GenX-7B").launch()