dzezzefezfz commited on
Commit
5e4a786
·
verified ·
1 Parent(s): 5463d07

Update backend_models.py

Browse files
Files changed (1) hide show
  1. backend_models.py +67 -0
backend_models.py CHANGED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Iterator, List, Tuple
3
+
4
+ def get_default_local_model() -> str:
5
+ return os.getenv("LOCAL_MODEL", "TinyLlama/TinyLlama-1.1B-Chat-v1.0")
6
+
7
+ class LocalHFBackend:
8
+ def __init__(self, model_name: str):
9
+ self.model_name = model_name
10
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
11
+ import torch
12
+
13
+ self.torch = torch
14
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
15
+ self.model = AutoModelForCausalLM.from_pretrained(
16
+ self.model_name,
17
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
18
+ device_map="auto" if torch.cuda.is_available() else None,
19
+ )
20
+ self.streamer_cls = TextIteratorStreamer
21
+
22
+ def _build_prompt(self, system_prompt: str, history: List[Tuple[str, str]], user_msg: str) -> str:
23
+ parts = [f"<|system|>\n{system_prompt}\n</s>"]
24
+ for u, a in history:
25
+ if u:
26
+ parts.append(f"<|user|>\n{u}\n</s>")
27
+ if a:
28
+ parts.append(f"<|assistant|>\n{a}\n</s>")
29
+ parts.append(f"<|user|>\n{user_msg}\n</s>\n<|assistant|>\n")
30
+ return "".join(parts)
31
+
32
+ def generate_stream(
33
+ self,
34
+ system_prompt: str,
35
+ history: List[Tuple[str, str]],
36
+ user_msg: str,
37
+ temperature: float,
38
+ max_new_tokens: int,
39
+ ) -> Iterator[str]:
40
+ from threading import Thread
41
+ from transformers import StoppingCriteria, StoppingCriteriaList
42
+
43
+ prompt = self._build_prompt(system_prompt, history, user_msg)
44
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
45
+
46
+ class StopOnAssistantTag(StoppingCriteria):
47
+ def __call__(self, input_ids, scores, **kwargs):
48
+ text = self.tokenizer.decode(input_ids[0].tolist()[-20:])
49
+ return "</s><|user|>" in text
50
+
51
+ streamer = self.streamer_cls(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
52
+ gen_kwargs = dict(
53
+ **inputs,
54
+ streamer=streamer,
55
+ max_new_tokens=max_new_tokens,
56
+ temperature=temperature,
57
+ do_sample=True if temperature > 0 else False,
58
+ eos_token_id=self.tokenizer.eos_token_id,
59
+ stopping_criteria=StoppingCriteriaList([StopOnAssistantTag()]),
60
+ )
61
+ thread = Thread(target=self.model.generate, kwargs=gen_kwargs)
62
+ thread.start()
63
+
64
+ buf = []
65
+ for token in streamer:
66
+ buf.append(token)
67
+ yield "".join(buf)