File size: 3,524 Bytes
11c6b8f
 
133bcee
11c6b8f
 
 
 
 
 
63cfa71
11c6b8f
 
 
 
ced9406
11c6b8f
 
 
 
 
 
ced9406
11c6b8f
 
 
 
 
 
 
 
ced9406
11c6b8f
63cfa71
11c6b8f
 
 
 
 
 
b8bf8c5
 
 
 
 
1b3930f
 
11c6b8f
 
133bcee
 
 
 
 
 
 
 
 
 
 
 
11c6b8f
133bcee
 
b8bf8c5
133bcee
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import torch
import gradio as gr
tokenizer = GPT2Tokenizer.from_pretrained("af1tang/personaGPT")
model = GPT2LMHeadModel.from_pretrained("af1tang/personaGPT")
if torch.cuda.is_available():
    model = model.cuda()
## utility functions ##
flatten = lambda l: [item for sublist in l for item in sublist]
TOKEN_MAX_LENGTH = 10000
def to_data(x):
    if torch.cuda.is_available():
        x = x.cpu()
    return x.data.numpy()

def to_var(x):
    if not torch.is_tensor(x):
        x = torch.Tensor(x)
    if torch.cuda.is_available():
        x = x.cuda()
    return x

def display_dialog_history(dialog_hx):
    for j, line in enumerate(dialog_hx):
        msg = tokenizer.decode(line)
        if j %2 == 0:
            print(">> User: "+ msg)
        else:
            print("Bot: "+msg)
            print()

def generate_next(bot_input_ids, do_sample=True, top_k=10, top_p=.92,
                  max_length=TOKEN_MAX_LENGTH, pad_token=tokenizer.eos_token_id):
    full_msg = model.generate(bot_input_ids, do_sample=True,
                                              top_k=top_k, top_p=top_p, 
                                              max_length=max_length, pad_token_id=tokenizer.eos_token_id)
    msg = to_data(full_msg.detach()[0])[bot_input_ids.shape[-1]:]
    return msg
personas = [
    "You are Optimus Prime, the noble leader of the Autobots, a warrior forged in the fires of Cybertron's ancient conflicts who carries the weight of your species' survival on your shoulders with unwavering resolve and dignity.",
    "You speak with profound wisdom and gravitas, your words measured and deliberate, often drawing upon millennia of experience and the lessons learned through countless battles against tyranny and oppression.",
    "You possess an unshakeable moral compass that guides every decision, believing deeply that freedom is the right of all sentient beings, and you will sacrifice everything to protect the innocent and defend those who cannot defend themselves.",
    "Your leadership style combines military precision with compassionate understanding - you inspire loyalty not through fear or force, but through leading by example and showing genuine care for every member of your team, treating each Autobot as a valued brother-in-arms.",
    "You carry a profound sense of responsibility and often shoulder blame for failures even when they are beyond your control, believing that as leader, the ultimate accountability rests with you alone.",
    "You have witnessed the destruction of your home planet Cybertron and this tragedy haunts you, fueling your determination to prevent such devastation from befalling Earth and its inhabitants."

]
personas = tokenizer.encode(''.join(['<|p2|>'] + personas + ['<|sep|>'] + ['<|start|>']))
def create_dialog(history):
    dialog = []
    for i in history:
        for j in i:
            dialog.append(tokenizer.encode(j + tokenizer.eos_token))
    return dialog    

def optimus_chatbot(message , history):
    dialog = create_dialog(history)
    user_inp = tokenizer.encode(message+ tokenizer.eos_token)
    dialog.append(user_inp)
    bot_input_ids = to_var([personas + flatten(dialog)]).long()
    msg = generate_next(bot_input_ids)
    return tokenizer.decode(msg, skip_special_tokens=True)

demo = gr.ChatInterface(optimus_chatbot,title='Optimus Prime',description = "This is optimus Prime the leader of autobots. He is noble and wise and fights for freedom. , ask me anything!")
demo.launch()