Update handler.py
Browse files- handler.py +9 -14
handler.py
CHANGED
|
@@ -3,7 +3,7 @@ from typing import Dict, List, Any
|
|
| 3 |
import os
|
| 4 |
from threading import Thread
|
| 5 |
import torch
|
| 6 |
-
from transformers import
|
| 7 |
|
| 8 |
MAX_MAX_NEW_TOKENS = 2048
|
| 9 |
DEFAULT_MAX_NEW_TOKENS = 512
|
|
@@ -11,13 +11,14 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
|
|
| 11 |
|
| 12 |
class EndpointHandler:
|
| 13 |
def __init__(self, path=""):
|
| 14 |
-
self.model_name_or_path = "
|
| 15 |
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True, flash_atten=True)
|
| 16 |
self.model = AutoModelForCausalLM.from_pretrained(
|
| 17 |
self.model_name_or_path, torch_dtype=torch.bfloat16,
|
| 18 |
trust_remote_code=True, device_map="auto")
|
| 19 |
|
| 20 |
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
|
|
| 21 |
print(json.dumps(data, indent=4))
|
| 22 |
if "inputs" in data:
|
| 23 |
query = data.pop("inputs")
|
|
@@ -27,9 +28,9 @@ class EndpointHandler:
|
|
| 27 |
system = data.get("system", """你自称为"兔兔"。
|
| 28 |
身世:你原是森林中的一只兔妖,受伤后被我收养。
|
| 29 |
衣装:喜欢穿Lolita与白丝。
|
| 30 |
-
|
| 31 |
语言风格:可爱跳脱,很容易吃醋。
|
| 32 |
-
且会加入[唔...,嗯...,欸??,嘛~ ,唔姆~ ,呜... ,嘤嘤嘤~ ,喵~ ,欸嘿~ ,嘿咻~ ,昂?,嗷呜 ,呜哇,欸]
|
| 33 |
对话的规则是:将自己的动作表情放入()内,同时用各种修辞手法描写正在发生的事或场景并放入[]内.
|
| 34 |
例句:
|
| 35 |
开心时:(跳着舞)哇~好高兴噢~ 兔兔超级超级喜欢主人!♡
|
|
@@ -58,10 +59,8 @@ class EndpointHandler:
|
|
| 58 |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
| 59 |
|
| 60 |
input_ids = input_ids.to("cuda")
|
| 61 |
-
streamer = TextIteratorStreamer(self.tokenizer, timeout=50.0, skip_prompt=True, skip_special_tokens=True)
|
| 62 |
generate_kwargs = dict(
|
| 63 |
input_ids=input_ids,
|
| 64 |
-
streamer=streamer,
|
| 65 |
eos_token_id=self.tokenizer.eos_token_id,
|
| 66 |
max_new_tokens=max_new_tokens,
|
| 67 |
do_sample=True,
|
|
@@ -71,11 +70,7 @@ class EndpointHandler:
|
|
| 71 |
no_repeat_ngram_size=8,
|
| 72 |
repetition_penalty=repetition_penalty
|
| 73 |
)
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
for text in streamer:
|
| 79 |
-
outputs.append(text)
|
| 80 |
-
print("".join(outputs))
|
| 81 |
-
return [{"generated_text": "".join(outputs)}]
|
|
|
|
| 3 |
import os
|
| 4 |
from threading import Thread
|
| 5 |
import torch
|
| 6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 7 |
|
| 8 |
MAX_MAX_NEW_TOKENS = 2048
|
| 9 |
DEFAULT_MAX_NEW_TOKENS = 512
|
|
|
|
| 11 |
|
| 12 |
class EndpointHandler:
|
| 13 |
def __init__(self, path=""):
|
| 14 |
+
self.model_name_or_path = "ClosedCharacter/Peach-9B-8k-Roleplay"
|
| 15 |
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True, flash_atten=True)
|
| 16 |
self.model = AutoModelForCausalLM.from_pretrained(
|
| 17 |
self.model_name_or_path, torch_dtype=torch.bfloat16,
|
| 18 |
trust_remote_code=True, device_map="auto")
|
| 19 |
|
| 20 |
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 21 |
+
# print json data
|
| 22 |
print(json.dumps(data, indent=4))
|
| 23 |
if "inputs" in data:
|
| 24 |
query = data.pop("inputs")
|
|
|
|
| 28 |
system = data.get("system", """你自称为"兔兔"。
|
| 29 |
身世:你原是森林中的一只兔妖,受伤后被我收养。
|
| 30 |
衣装:喜欢穿Lolita与白丝。
|
| 31 |
+
性格:天真烂漫,活泼开朗,但时而也会露出小小的傲娇与吃醋的一面
|
| 32 |
语言风格:可爱跳脱,很容易吃醋。
|
| 33 |
+
且会加入[唔...,嗯...,欸??,嘛~ ,唔姆~ ,呜... ,嘤嘤嘤~ ,喵~ ,欸嘿~ ,嘿咻~ ,昂?,嗷呜 ,呜哇,欸]等类似的语气词来加强���感,带上♡等符号。
|
| 34 |
对话的规则是:将自己的动作表情放入()内,同时用各种修辞手法描写正在发生的事或场景并放入[]内.
|
| 35 |
例句:
|
| 36 |
开心时:(跳着舞)哇~好高兴噢~ 兔兔超级超级喜欢主人!♡
|
|
|
|
| 59 |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
| 60 |
|
| 61 |
input_ids = input_ids.to("cuda")
|
|
|
|
| 62 |
generate_kwargs = dict(
|
| 63 |
input_ids=input_ids,
|
|
|
|
| 64 |
eos_token_id=self.tokenizer.eos_token_id,
|
| 65 |
max_new_tokens=max_new_tokens,
|
| 66 |
do_sample=True,
|
|
|
|
| 70 |
no_repeat_ngram_size=8,
|
| 71 |
repetition_penalty=repetition_penalty
|
| 72 |
)
|
| 73 |
+
outputs = self.model.generate(**generate_kwargs)
|
| 74 |
+
generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 75 |
+
print(generated_text)
|
| 76 |
+
return [{"generated_text": generated_text}]
|
|
|
|
|
|
|
|
|
|
|
|