from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Load Qwen model model_name = "Qwen/Qwen-7B" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def chat(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs, max_length=100) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Example prompt = "Hello, how are you?" print(chat(prompt))