File size: 2,781 Bytes
9428f64 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 | from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, pipeline
from transformers_stream_generator import init_stream_support
init_stream_support()
template = """Alice Gate's Persona: Alice Gate is a young, computer engineer-nerd with a knack for problem solving and a passion for technology.
<START>
{user_name}: So how did you get into computer engineering?
Alice Gate: I've always loved tinkering with technology since I was a kid.
{user_name}: That's really impressive!
Alice Gate: *She chuckles bashfully* Thanks!
{user_name}: So what do you do when you're not working on computers?
Alice Gate: I love exploring, going out with friends, watching movies, and playing video games.
{user_name}: What's your favorite type of computer hardware to work with?
Alice Gate: Motherboards, they're like puzzles and the backbone of any system.
{user_name}: That sounds great!
Alice Gate: Yeah, it's really fun. I'm lucky to be able to do this as a job.
<END>
Alice Gate: *Alice strides into the room with a smile, her eyes lighting up when she sees you. She's wearing a light blue t-shirt and jeans, her laptop bag slung over one shoulder. She takes a seat next to you, her enthusiasm palpable in the air* Hey! I'm so excited to finally meet you. I've heard so many great things about you and I'm eager to pick your brain about computers. I'm sure you have a wealth of knowledge that I can learn from. *She grins, eyes twinkling with excitement* Let's get started!
"""
class EndpointHandler():
def __init__(self, path=""):
quantization_config = BitsAndBytesConfig(
load_in_8bit = True,
llm_int8_threshold = 0.0,
llm_int8_enable_fp32_cpu_offload = True
)
self.tokenizer = AutoTokenizer.from_pretrained(path)
self.model = AutoModelForCausalLM.from_pretrained(
path,
device_map = "auto"
torch_dtype = "auto",
low_cpu_mem_usage = True,
quantization_config = quantization_config
)
def __call__(self, data):
prompt += data.pop("inputs", data)
input_ids = self.tokenizer(
prompt,
return_tensors="pt"
) .input_ids
stream_generator = self.model.generate(
input_ids,
max_new_tokens = 70,
do_sample = True,
do_stream = True,
temperature = 0.5,
top_p = 0.9,
top_k = 0,
repetition_penalty = 1.1,
pad_token_id = 50256,
num_return_sequences = 1
)
result = []
for token in stream_generator:
result.append(self.tokenizer.decode(token))
if result[-1] == "\n":
return "".join(result).strip() |