solon commited on
Commit
309d106
ยท
1 Parent(s): 74485bc

Load tokenizer from base model to fix compatibility

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -4,14 +4,15 @@ import torch.nn.functional as F
4
  from transformers import AutoTokenizer, AutoModelForMaskedLM
5
 
6
  # ์„ค์ •
7
- MODEL_ID = "solonsophy/kf-deberta-gen" # HuggingFace ๋ชจ๋ธ repo์—์„œ ๋กœ๋“œ
 
8
  MAX_LEN = 256
9
  Q_MAX_LEN = 100
10
 
11
  # ๋ชจ๋ธ ๋กœ๋“œ
12
  print("๐Ÿ”„ Loading model...")
13
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
14
- model = AutoModelForMaskedLM.from_pretrained(MODEL_ID)
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
  model = model.to(device)
17
  model.eval()
 
4
  from transformers import AutoTokenizer, AutoModelForMaskedLM
5
 
6
  # ์„ค์ •
7
+ MODEL_ID = "solonsophy/kf-deberta-gen" # ํŒŒ์ธํŠœ๋‹๋œ ๋ชจ๋ธ
8
+ BASE_MODEL_ID = "kakaobank/kf-deberta-base" # ๊ธฐ๋ฐ˜ ๋ชจ๋ธ (ํ† ํฌ๋‚˜์ด์ €์šฉ)
9
  MAX_LEN = 256
10
  Q_MAX_LEN = 100
11
 
12
  # ๋ชจ๋ธ ๋กœ๋“œ
13
  print("๐Ÿ”„ Loading model...")
14
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID) # ๊ธฐ๋ฐ˜ ๋ชจ๋ธ์—์„œ ํ† ํฌ๋‚˜์ด์ € ๋กœ๋“œ
15
+ model = AutoModelForMaskedLM.from_pretrained(MODEL_ID) # ํŒŒ์ธํŠœ๋‹๋œ ๊ฐ€์ค‘์น˜ ๋กœ๋“œ
16
  device = "cuda" if torch.cuda.is_available() else "cpu"
17
  model = model.to(device)
18
  model.eval()