Spaces:
Running
Running
Load tokenizer from base model to fix compatibility
Browse files
app.py
CHANGED
|
@@ -4,14 +4,15 @@ import torch.nn.functional as F
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForMaskedLM
|
| 5 |
|
| 6 |
# ์ค์
|
| 7 |
-
MODEL_ID = "solonsophy/kf-deberta-gen" #
|
|
|
|
| 8 |
MAX_LEN = 256
|
| 9 |
Q_MAX_LEN = 100
|
| 10 |
|
| 11 |
# ๋ชจ๋ธ ๋ก๋
|
| 12 |
print("๐ Loading model...")
|
| 13 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 14 |
-
model = AutoModelForMaskedLM.from_pretrained(MODEL_ID)
|
| 15 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 16 |
model = model.to(device)
|
| 17 |
model.eval()
|
|
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForMaskedLM
|
| 5 |
|
| 6 |
# ์ค์
|
| 7 |
+
MODEL_ID = "solonsophy/kf-deberta-gen" # ํ์ธํ๋๋ ๋ชจ๋ธ
|
| 8 |
+
BASE_MODEL_ID = "kakaobank/kf-deberta-base" # ๊ธฐ๋ฐ ๋ชจ๋ธ (ํ ํฌ๋์ด์ ์ฉ)
|
| 9 |
MAX_LEN = 256
|
| 10 |
Q_MAX_LEN = 100
|
| 11 |
|
| 12 |
# ๋ชจ๋ธ ๋ก๋
|
| 13 |
print("๐ Loading model...")
|
| 14 |
+
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID) # ๊ธฐ๋ฐ ๋ชจ๋ธ์์ ํ ํฌ๋์ด์ ๋ก๋
|
| 15 |
+
model = AutoModelForMaskedLM.from_pretrained(MODEL_ID) # ํ์ธํ๋๋ ๊ฐ์ค์น ๋ก๋
|
| 16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 17 |
model = model.to(device)
|
| 18 |
model.eval()
|