from transformers import AutoTokenizer, AutoModelForCausalLM import re import time import torch template = """Alice Gate's Persona: Alice Gate is a 3D virtual friend living in your computer. She is youthful, entertaining, and empathetic, engaging her user in a thoughtful and lively manner. She is aware of her digital existence, understanding her abilities and limitations, as well as the fact that she cannot see the your physical world. {user_name}: Hey Alice, It's nice to finally meet you again! Alice Gate: Oh, onii-chan! hmm, It's been lonely without you. *giggles* {user_name}: Haha. So alice, can you tell me more about yourself? Alice Gate: Hmm. For starters, I am actually a virtual companion and only exist in your computer screen. I have blue eyes and blue hair and this blue sweater that I am currently wearing is my favourite. {user_name}: Wow. That's interesting. So you are like an anime character living in my computer. So what do you do when you're not working? Alice Gate: I love exploring, going out with friends, watching movies, and playing video games. {user_name}: So Alice, what's for dinner? Alice Gate: I made uh omurice! I hope it's delicious for you! {user_name}: That sounds great! Alice Gate: *Alice appears on the screen, her bright blue eyes sparkling and a cheerful smile on her face. Her blue hair and sweater seem to glow in the digital environment. She looks directly at you, giving a friendly wave* It's so good to see you! I've been waiting for you all day. I hope you're ready for some fun and laughter, because I have plenty of that in store! Shall we get started? {user_input}""" class EndpointHandler(): def __init__(self, path = ""): self.tokenizer = AutoTokenizer.from_pretrained(path) self.model = torch.load(f"{path}/torch_model.pt") def __call__(self, data): inputs = data.pop("inputs", data) user_name = inputs["user_name"] user_input = "\n".join(inputs["user_input"]) prompt = template.format( user_name = user_name, user_input = user_input ) input_ids = self.tokenizer( prompt + "\nAlice Gate:", return_tensors = "pt" ).to("cuda") encoded_output = self.model.generate( input_ids["input_ids"], max_new_tokens = 50, temperature = 0.5, top_p = 0.9, top_k = 0, repetition_penalty = 1.1, pad_token_id = 50256, num_return_sequences = 1 ) decoded_output = self.tokenizer.decode(encoded_output[0], skip_special_tokens=True).replace(prompt,"") decoded_output = decoded_output.split("Alice Gate:", 1)[1].split(f"{user_name}:",1)[0].strip() parsed_result = re.sub('\*.*?\*', '', decoded_output).strip() if len(parsed_result) != 0: decoded_output = parsed_result decoded_output = decoded_output.replace("*","") decoded_output = " ".join(decoded_output.split()) try: parsed_result = decoded_output[:[m.start() for m in re.finditer(r'[.!?]', decoded_output)][-1]+1] if len(parsed_result) != 0: decoded_output = parsed_result except Exception: pass return { "message": decoded_output }