Spaces:
Sleeping
Sleeping
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # ๋ฌด๋ฃ ๋ชจ๋ธ ์ ํ (Mistral 7B) | |
| model_name = "mistralai/Mistral-7B-Instruct-v0.1" | |
| # ํ ํฌ๋์ด์ ๋ฐ ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto") | |
| def chat(user_input): | |
| inputs = tokenizer(user_input, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu") | |
| outputs = model.generate(**inputs, max_length=200) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # ํ ์คํธ ์คํ | |
| print(chat("Hello, how are you?")) |