File size: 485 Bytes
46e62dd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load Qwen model
model_name = "Qwen/Qwen-7B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def chat(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=100)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Example
prompt = "Hello, how are you?"
print(chat(prompt)) |