BlueDice commited on
Commit
d016d59
·
1 Parent(s): 92d4208

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +28 -36
handler.py CHANGED
@@ -3,55 +3,35 @@ import re
3
  import time
4
  import torch
5
 
6
- template = """Alice Gate's Persona: Alice Gate is a young, computer engineer-nerd with a knack for problem solving and a passion for technology.
7
- <START>
8
- {user_name}: So how did you get into computer engineering?
9
- Alice Gate: I've always loved tinkering with technology since I was a kid.
10
- {user_name}: That's really impressive!
11
- Alice Gate: *She chuckles bashfully* Thanks!
12
- {user_name}: So what do you do when you're not working on computers?
13
- Alice Gate: I love exploring, going out with friends, watching movies, and playing video games.
14
- {user_name}: What's your favorite type of computer hardware to work with?
15
- Alice Gate: Motherboards, they're like puzzles and the backbone of any system.
16
- {user_name}: That sounds great!
17
- Alice Gate: Yeah, it's really fun. I'm lucky to be able to do this as a job.
18
- {user_name}: Definetly.
19
- <END>
20
- Alice Gate: *Alice strides into the room with a smile, her eyes lighting up when she sees you. She's wearing a light blue t-shirt and jeans, her laptop bag slung over one shoulder. She takes a seat next to you, her enthusiasm palpable in the air* Hey! I'm so excited to finally meet you. I've heard so many great things about you and I'm eager to pick your brain about computers. I'm sure you have a wealth of knowledge that I can learn from. *She grins, eyes twinkling with excitement* Let's get started!
21
- {user_input}
22
- Alice Gate:"""
23
-
24
  class EndpointHandler():
25
 
26
  def __init__(self, path = ""):
27
  self.tokenizer = AutoTokenizer.from_pretrained(path)
28
  self.model = torch.load(f"{path}/torch_model.pt")
29
-
30
- def response(self, result, user_name):
31
- result = result.rsplit("Alice Gate:", 1)[1].split(f"{user_name}:",1)[0].strip()
32
- parsed_result = re.sub('\*.*?\*', '', result).strip()
33
- result = parsed_result if len(parsed_result) != 0 else result.replace("*","")
34
- result = " ".join(result.split())
35
- try:
36
- result = result[:[m.start() for m in re.finditer(r'[.!?]', result)][-1]+1]
37
- except Exception: pass
38
- return {
39
- "message": result
40
- }
41
 
42
  def __call__(self, data):
43
- inputs = data.pop("inputs", data)
44
- user_name = inputs["user_name"]
45
- user_input = "\n".join(inputs["user_input"])
 
 
 
 
 
 
 
 
 
46
  prompt = template.format(
 
47
  user_name = user_name,
48
  user_input = user_input
49
  )
50
  input_ids = self.tokenizer(
51
- prompt,
52
  return_tensors = "pt"
53
  ).to("cuda")
54
- generator = self.model.generate(
55
  input_ids["input_ids"],
56
  max_new_tokens = 50,
57
  temperature = 0.5,
@@ -61,4 +41,16 @@ class EndpointHandler():
61
  pad_token_id = 50256,
62
  num_return_sequences = 1
63
  )
64
- return self.response(self.tokenizer.decode(generator[0], skip_special_tokens=True), user_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import time
4
  import torch
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  class EndpointHandler():
7
 
8
  def __init__(self, path = ""):
9
  self.tokenizer = AutoTokenizer.from_pretrained(path)
10
  self.model = torch.load(f"{path}/torch_model.pt")
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  def __call__(self, data):
13
+ request_inputs = input_data.pop("inputs", input_data)
14
+ template = request_inputs["template"]
15
+ messages = request_inputs["messages"]
16
+ char_name = request_inputs["char_name"]
17
+ user_name = request_inputs["user_name"]
18
+ template = open(f"{template}.txt", "r").read()
19
+ user_input = [
20
+ "{name}: {message}".format(
21
+ name = char_name if (id["role"] == "AI") else user_name,
22
+ message = id["message"].strip()
23
+ ) for id in messages
24
+ ]
25
  prompt = template.format(
26
+ char_name = char_name,
27
  user_name = user_name,
28
  user_input = user_input
29
  )
30
  input_ids = self.tokenizer(
31
+ prompt + f"\n{char_name}:",
32
  return_tensors = "pt"
33
  ).to("cuda")
34
+ encoded_output = self.model.generate(
35
  input_ids["input_ids"],
36
  max_new_tokens = 50,
37
  temperature = 0.5,
 
41
  pad_token_id = 50256,
42
  num_return_sequences = 1
43
  )
44
+ decoded_output = self.tokenizer.decode(encoded_output[0], skip_special_tokens=True).replace(prompt,"")
45
+ decoded_output = decoded_output.split(f"{char_name}:", 1)[1].split(f"{user_name}:",1)[0].strip()
46
+ parsed_result = re.sub('\*.*?\*', '', decoded_output).strip()
47
+ if len(parsed_result) != 0: decoded_output = parsed_result
48
+ decoded_output = " ".join(decoded_output.replace("*","").split())
49
+ try:
50
+ parsed_result = decoded_output[:[m.start() for m in re.finditer(r'[.!?]', decoded_output)][-1]+1]
51
+ if len(parsed_result) != 0: decoded_output = parsed_result
52
+ except Exception: pass
53
+ return {
54
+ "role": "AI",
55
+ "message": decoded_output
56
+ }