Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,28 +1,25 @@
|
|
| 1 |
import torch
|
| 2 |
-
|
| 3 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
|
| 4 |
|
| 5 |
-
|
| 6 |
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
local_files_only=False,
|
| 13 |
-
torch_dtype=torch.float32,
|
| 14 |
-
device_map="auto" if torch.cuda.is_available() else None,
|
| 15 |
-
)
|
| 16 |
|
| 17 |
system_message = "Ты — умный помощник по Университету Иннополис."
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
|
| 24 |
|
| 25 |
-
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 26 |
with torch.no_grad():
|
| 27 |
outputs = model.generate(
|
| 28 |
**inputs,
|
|
@@ -33,11 +30,6 @@ def respond(message, history=None):
|
|
| 33 |
use_cache=True,
|
| 34 |
)
|
| 35 |
|
| 36 |
-
|
| 37 |
-
answer = tokenizer.decode(
|
| 38 |
-
|
| 39 |
-
history.append((message, answer))
|
| 40 |
-
return history
|
| 41 |
-
|
| 42 |
-
chat = gr.ChatInterface(fn=respond, title="Innopolis Assistant")
|
| 43 |
-
chat.launch()
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
| 3 |
|
| 4 |
+
model_id = "cody82/unitrip" # или другой ID модели с Huggingface
|
| 5 |
|
| 6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id) # без local_files_only
|
| 7 |
+
model = AutoModelForCausalLM.from_pretrained(model_id) # без local_files_only
|
| 8 |
+
|
| 9 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
+
model.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
system_message = "Ты — умный помощник по Университету Иннополис."
|
| 13 |
|
| 14 |
+
while True:
|
| 15 |
+
user_input = input("🧑 Вопрос: ").strip()
|
| 16 |
+
if user_input.lower() in ["exit", "выход", "quit"]:
|
| 17 |
+
break
|
| 18 |
+
|
| 19 |
+
prompt = f"{system_message}\nUser: {user_input}\nAssistant:"
|
| 20 |
|
| 21 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
| 22 |
|
|
|
|
| 23 |
with torch.no_grad():
|
| 24 |
outputs = model.generate(
|
| 25 |
**inputs,
|
|
|
|
| 30 |
use_cache=True,
|
| 31 |
)
|
| 32 |
|
| 33 |
+
generated = outputs[0][inputs["input_ids"].shape[1]:]
|
| 34 |
+
answer = tokenizer.decode(generated, skip_special_tokens=True)
|
| 35 |
+
print(f"🤖 Ответ: {answer.strip()}\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|