change model path
Browse files- handler.py +6 -4
handler.py
CHANGED
|
@@ -10,10 +10,11 @@ from transformers import (
|
|
| 10 |
class EndpointHandler():
|
| 11 |
def __init__(self, path=""):
|
| 12 |
# Load model from HuggingFace Hub
|
|
|
|
| 13 |
config = BartConfig.from_pretrained("hyunwoongko/kobart")
|
| 14 |
self.model = BartForConditionalGeneration(config).eval().to('cpu')
|
| 15 |
self.model.model.load_state_dict(torch.load(
|
| 16 |
-
|
| 17 |
map_location='cpu',
|
| 18 |
))
|
| 19 |
self.tokenizer = PreTrainedTokenizerFast.from_pretrained("hyunwoongko/kobart")
|
|
@@ -43,7 +44,7 @@ class EndpointHandler():
|
|
| 43 |
else:
|
| 44 |
texts = dataPop
|
| 45 |
|
| 46 |
-
tokenized = tokenize(tokenizer, texts)
|
| 47 |
input_ids = tokenized["input_ids"]
|
| 48 |
attention_mask = tokenized["attention_mask"]
|
| 49 |
|
|
@@ -92,6 +93,7 @@ class EndpointHandler():
|
|
| 92 |
return {"summarization": summ_result}
|
| 93 |
|
| 94 |
def tokenize(
|
|
|
|
| 95 |
tokenizer,
|
| 96 |
texts: List[str],
|
| 97 |
max_len: int = 1024,
|
|
@@ -114,9 +116,9 @@ class EndpointHandler():
|
|
| 114 |
# result + <eos>
|
| 115 |
)
|
| 116 |
|
| 117 |
-
return add_bos_eos_tokens(tokenizer, tokens, eos_list)
|
| 118 |
|
| 119 |
-
def add_bos_eos_tokens(tokenizer, tokens, eos_list):
|
| 120 |
input_ids = tokens["input_ids"]
|
| 121 |
attention_mask = tokens["attention_mask"]
|
| 122 |
token_added_ids, token_added_masks = [], []
|
|
|
|
| 10 |
class EndpointHandler():
|
| 11 |
def __init__(self, path=""):
|
| 12 |
# Load model from HuggingFace Hub
|
| 13 |
+
self.model_path = path + "/" + "kobartbasekosummary.pt"
|
| 14 |
config = BartConfig.from_pretrained("hyunwoongko/kobart")
|
| 15 |
self.model = BartForConditionalGeneration(config).eval().to('cpu')
|
| 16 |
self.model.model.load_state_dict(torch.load(
|
| 17 |
+
self.model_path,
|
| 18 |
map_location='cpu',
|
| 19 |
))
|
| 20 |
self.tokenizer = PreTrainedTokenizerFast.from_pretrained("hyunwoongko/kobart")
|
|
|
|
| 44 |
else:
|
| 45 |
texts = dataPop
|
| 46 |
|
| 47 |
+
tokenized = self.tokenize(tokenizer, texts)
|
| 48 |
input_ids = tokenized["input_ids"]
|
| 49 |
attention_mask = tokenized["attention_mask"]
|
| 50 |
|
|
|
|
| 93 |
return {"summarization": summ_result}
|
| 94 |
|
| 95 |
def tokenize(
|
| 96 |
+
self,
|
| 97 |
tokenizer,
|
| 98 |
texts: List[str],
|
| 99 |
max_len: int = 1024,
|
|
|
|
| 116 |
# result + <eos>
|
| 117 |
)
|
| 118 |
|
| 119 |
+
return self.add_bos_eos_tokens(tokenizer, tokens, eos_list)
|
| 120 |
|
| 121 |
+
def add_bos_eos_tokens(self, tokenizer, tokens, eos_list):
|
| 122 |
input_ids = tokens["input_ids"]
|
| 123 |
attention_mask = tokens["attention_mask"]
|
| 124 |
token_added_ids, token_added_masks = [], []
|