| from transformers import AutoTokenizer, AutoModelForCausalLM |
| import torch |
|
|
| |
| model_path = "tosei0000/code" |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) |
| model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) |
|
|
| |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| model.to(device) |
|
|
| |
| def chat(prompt, max_new_tokens=100): |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) |
| outputs = model.generate(**inputs, max_new_tokens=max_new_tokens) |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
| |
| response = chat("你好,请介绍一下你自己。") |
| print(response) |
|
|