|
|
import gradio as gr |
|
|
from gradio.inputs import Textbox, Slider |
|
|
|
|
|
import requests |
|
|
|
|
|
|
|
|
title = "A conversation with some NPC in a Tavern 🍻" |
|
|
description = "" |
|
|
article = """ |
|
|
<p> If you liked don't forget to 💖 the project 🥰 </p> |
|
|
<h2> Parameters: </h2> |
|
|
<ul> |
|
|
<li><i>message</i>: what you want to say to the NPC.</li> |
|
|
<li><i>npc_name</i>: name of the NPC.</li> |
|
|
<li><i>npc_prompt</i>: prompt of the NPC, we can modify it to see if results are better.</li> |
|
|
<li><i>top_p</i>: control how deterministic the model is in generating a response.</li> |
|
|
<li><i>temperature</i>: (sampling temperature) higher values means the model will take more risks.</li> |
|
|
<li><i>max_new_tokens</i>: Max number of tokens in generation.</li> |
|
|
</ul> |
|
|
<img src='http://www.simoninithomas.com/test/gandalf.jpg', alt="Gandalf"/>""" |
|
|
theme="huggingface" |
|
|
|
|
|
|
|
|
|
|
|
def build_prompt(conversation, context, interlocutor_names): |
|
|
prompt = context + "\n" |
|
|
for player_msg, npc_msg in conversation: |
|
|
line = "\n- " + interlocutor_names[0] + ":" + player_msg |
|
|
prompt += line |
|
|
line = "\n- " + interlocutor_names[1] + ":" + npc_msg |
|
|
prompt += line |
|
|
prompt += "" |
|
|
return prompt |
|
|
|
|
|
|
|
|
def clean_chat_output(txt, prompt, interlocutor_names): |
|
|
delimiter = "\n- "+interlocutor_names[0] |
|
|
output = txt.replace(prompt, '') |
|
|
output = output[:output.find(delimiter)] |
|
|
return output |
|
|
|
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-j-6B" |
|
|
def query(payload): |
|
|
response = requests.post(API_URL, json=payload) |
|
|
return response.json() |
|
|
|
|
|
def chat(message, npc_name, initial_prompt, top_p, temperature, max_new_tokens, history=[]): |
|
|
interlocutor_names = ["Player", npc_name] |
|
|
|
|
|
print("message", message) |
|
|
print("npc_name", npc_name) |
|
|
print("initial_prompt", initial_prompt) |
|
|
print("top_p", top_p) |
|
|
print("temperature", temperature) |
|
|
print("max_new_tokens", max_new_tokens) |
|
|
print("history", history) |
|
|
response = "Test" |
|
|
history.append((message, "")) |
|
|
conversation = history |
|
|
|
|
|
|
|
|
prompt = build_prompt(conversation, initial_prompt, interlocutor_names) |
|
|
|
|
|
|
|
|
json_req = {"inputs": prompt, |
|
|
"parameters": |
|
|
{ |
|
|
"top_p": top_p, |
|
|
"temperature": temperature, |
|
|
"max_new_tokens": max_new_tokens, |
|
|
"return_full_text": False |
|
|
}} |
|
|
|
|
|
|
|
|
output = query(json_req) |
|
|
output = output[0]['generated_text'] |
|
|
print("output", output) |
|
|
|
|
|
answer = clean_chat_output(output, prompt, interlocutor_names) |
|
|
response = answer |
|
|
print("response", answer) |
|
|
history[-1] = (message, response) |
|
|
return history, history |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
iface = gr.Interface(fn=chat, |
|
|
inputs=[Textbox(label="message", placeholder="Hello!"), |
|
|
Textbox(label="npc_name", placeholder="Antoine"), |
|
|
Textbox(label="initial_prompt", placeholder="The following is a conversation with Antoine, a guard for Northfall that's drinking in the Tavern."), |
|
|
Slider(minimum=0.5, maximum=1, step=0.05, default=0.9, label="top_p"), |
|
|
Slider(minimum=0.5, maximum=1.5, step=0.1, default=1.1, label="temperature"), |
|
|
Slider(minimum=20, maximum=250, step=10, default=50, label="max_new_tokens"), |
|
|
"state"], |
|
|
outputs=["chatbot","state"], |
|
|
|
|
|
allow_screenshot=True, |
|
|
allow_flagging=True, |
|
|
title=title, |
|
|
article=article, |
|
|
theme=theme) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
iface.launch() |