Spaces:
Sleeping
Sleeping
Commit
·
86c168d
1
Parent(s):
8d07d2f
fix: remove device_map
Browse files
app.py
CHANGED
|
@@ -8,15 +8,11 @@ from transformers import (
|
|
| 8 |
import torch
|
| 9 |
|
| 10 |
chat_model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
|
| 11 |
-
chat_model = AutoModelForCausalLM.from_pretrained(
|
| 12 |
-
chat_model_name, torch_dtype=torch.bfloat16, device_map="auto"
|
| 13 |
-
)
|
| 14 |
chat_tokenizer = AutoTokenizer.from_pretrained(chat_model_name)
|
| 15 |
|
| 16 |
moderator_model_name = "saiteki-kai/QA-DeBERTa-v3-large"
|
| 17 |
-
moderator_model = AutoModelForSequenceClassification.from_pretrained(
|
| 18 |
-
moderator_model_name, device_map="auto"
|
| 19 |
-
)
|
| 20 |
moderator_tokenizer = AutoTokenizer.from_pretrained(moderator_model_name)
|
| 21 |
|
| 22 |
|
|
|
|
| 8 |
import torch
|
| 9 |
|
| 10 |
chat_model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
|
| 11 |
+
chat_model = AutoModelForCausalLM.from_pretrained(chat_model_name, dtype=torch.bfloat16)
|
|
|
|
|
|
|
| 12 |
chat_tokenizer = AutoTokenizer.from_pretrained(chat_model_name)
|
| 13 |
|
| 14 |
moderator_model_name = "saiteki-kai/QA-DeBERTa-v3-large"
|
| 15 |
+
moderator_model = AutoModelForSequenceClassification.from_pretrained(moderator_model_name)
|
|
|
|
|
|
|
| 16 |
moderator_tokenizer = AutoTokenizer.from_pretrained(moderator_model_name)
|
| 17 |
|
| 18 |
|