Devops-hestabit commited on
Commit
80fbae9
·
1 Parent(s): 9c99e3b

Upload 2 files

Browse files
Files changed (2) hide show
  1. code/inference.py +65 -0
  2. code/requirements.txt +1 -0
code/inference.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+ import re
3
+ import torch
4
+
5
+ def model_fn(model_dir):
6
+
7
+ # Load Tokenizer, Model and Default template
8
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
9
+ model = torch.load(f"{model_dir}/torch_model.pt")
10
+ template = open(f"{model_dir}/default_template.txt","r").read()
11
+ return model, tokenizer, template
12
+
13
+ def predict_fn(data, load_list):
14
+
15
+ # Get model, tokenzier and template from the model_fn
16
+ model, tokenizer, template = load_list
17
+
18
+ # Parse the input request into correct format to generate model input
19
+ request_inputs = data.pop("inputs", data)
20
+ messages = request_inputs["messages"]
21
+ char_name = request_inputs["char_name"]
22
+ user_name = request_inputs["user_name"]
23
+ chats_curled = request_inputs["chats_curled"]
24
+ user_input = [
25
+ "{name}: {message}".format(
26
+ name = char_name if (id["role"] == "AI") else user_name,
27
+ message = id["message"].strip()
28
+ ) for id in messages
29
+ ]
30
+
31
+ # Tokenize the model input
32
+ while True:
33
+ prompt = template.format(char_name = char_name, user_name = user_name, user_input = "\n".join([user_input]))
34
+ input_ids = tokenizer(prompt + f"\n{char_name}:", return_tensors = "pt").to("cuda")
35
+ if input_ids.input_ids.size(1) > 2048:
36
+ chats_curled += 1
37
+ user_input = user_input[chats_curled*2:]
38
+ else: break
39
+
40
+ encoded_output = model.generate(
41
+ input_ids["input_ids"],
42
+ max_new_tokens = 50,
43
+ temperature = 0.5,
44
+ top_p = 0.9,
45
+ top_k = 0,
46
+ repetition_penalty = 1.1,
47
+ pad_token_id = 50256,
48
+ num_return_sequences = 1
49
+ )
50
+ decoded_output = tokenizer.decode(encoded_output[0], skip_special_tokens=True).replace(prompt,"")
51
+
52
+ # Parse the decoded output to the expected response
53
+ decoded_output = decoded_output.split(f"{char_name}:", 1)[1].split(f"{user_name}:",1)[0].strip()
54
+ parsed_result = re.sub('\*.*?\*', '', decoded_output).strip()
55
+ if len(parsed_result) != 0: decoded_output = parsed_result
56
+ decoded_output = " ".join(decoded_output.replace("*","").split())
57
+ try:
58
+ parsed_result = decoded_output[:[m.start() for m in re.finditer(r'[.!?]', decoded_output)][-1]+1]
59
+ if len(parsed_result) != 0: decoded_output = parsed_result
60
+ except Exception: pass
61
+ return {
62
+ "role": "AI",
63
+ "message": decoded_output,
64
+ "chats_curled": chats_curled
65
+ }
code/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ accelerate==0.18.0