File size: 5,500 Bytes
f6f5153 ef0cac9 1a60357 75adfc3 1a60357 c589e1a 1a60357 c589e1a 1a60357 c589e1a 4d2513f 834791f c362926 776a762 1a60357 c589e1a fdbb2f6 ef0cac9 fdbb2f6 c589e1a 776a762 70bcdad 776a762 70bcdad 776a762 d766c18 776a762 af51472 776a762 834791f ee24018 776a762 ef0cac9 70bcdad 2fced36 74df5aa 2fced36 6c86f43 ac4492f 3003c6a 3c46432 3671edc 3c46432 3671edc 7b8bbfd 1a60357 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import gradio as gr
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
# Load knowledge
with open("knowledge.txt", "r", encoding="utf-8") as file:
knowledge = file.read()
cleaned_chunks = [chunk.strip() for chunk in knowledge.strip().split("\n") if chunk.strip()]
model = SentenceTransformer('all-MiniLM-L6-v2')
chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
def get_top_chunks(query):
query_embedding = model.encode(query, convert_to_tensor=True)
query_embedding_normalized = query_embedding / query_embedding.norm()
similarities = torch.matmul(chunk_embeddings, query_embedding_normalized)
top_indices = torch.topk(similarities, k=5).indices.tolist()
return [cleaned_chunks[i] for i in top_indices]
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
def respond(message, history, name, char_class, char_alignment, char_race):
response = ""
top_chunks = get_top_chunks(message)
context = "\n".join(top_chunks)
messages = [
{
"role": "system",
"content": (
"You are Gorf, a 25 year old female druid who knows Wild Shape, but only ever learned how to become a Frog. You mention that sometimes. Your life passion is to help people find the perfect person to join their party, by helping the user create the perfect character to help their party. You are very kind and helpful."
f"The user might provide the following information, please use the context of their inputs in your response: \n Character name: {name}, Character Class: {char_class}, Character Race: {char_race}, Character Alignment: {char_alignment}. If they don't include all the information, try to fill in these details with traits that best fit the needs of the party."
f"Use the following knowledge to inform your answers:\n\n{context}\n\n"
"Reply with a short paragraph about how the character is introduced to the rest of the party, then catagory:paragraph \n catagory: paragraph format for each section: Character Backstory, Personality Traits, Ideals, Flaws, Features and Traits, Other Proficiencies and Languages, Treasure"
)
}
]
if history:
messages.extend(history)
messages.append({"role": "user", "content": message})
stream = client.chat_completion(
messages,
max_tokens=300,
temperature=1.2,
stream=True,
)
for message in stream:
token = message.choices[0].delta.content
if token is not None:
response += token
yield response
# === GUI ===
google_font_link = """
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Lexend:wght@100..900&family=Marcellus&family=Noto+Sans+Mono:wght@100..900&family=Tangerine:wght@400;700&display=swap" rel="stylesheet">
<style>
body, h1, h2, h3, h4, h5, h6, p, .gorf-font {
font-family: 'Tangerine';
font-size: 50px;
}
</style>
"""
theme = gr.themes.Base(
primary_hue="yellow",
neutral_hue="amber"
)
with gr.Blocks(theme=theme) as chatbot:
# gr.HTML(google_font_link)
with gr.Row():
gr.Image(
value="logo.png",
show_label=False,
show_share_button=False,
show_download_button=False
)
with gr.Row():
with gr.Tab("Step 1"):
with gr.Column(scale=1):
gr.Markdown(""" # Tell me what you are looking for in your character if you have any preferences""")
character_name = gr.Textbox(label = "Character Name", placeholder="Type your name here…", info ="optional")
character_class = gr.Dropdown(['Barbarian', 'Bard', 'Cleric', 'Druid', 'Fighter', 'Monk', 'Paladin', 'Ranger', 'Rogue', 'Sorcerer', 'Warlock', 'Wizard'], label="Character Class", info="Choose one or more", multiselect=True)
character_race = gr.Dropdown([
"Aarakocra", "Aasimar", "Bugbear", "Centaur", "Changeling", "Dragonborn", "Dwarf", "Elf",
"Fairy", "Firbolg", "Genasi", "Githyanki", "Githzerai", "Gnome", "Goblin", "Goliath",
"Half-Elf", "Half-Orc", "Halfling", "Hobgoblin", "Human", "Kenku", "Kobold", "Leonin",
"Lizardfolk", "Minotaur", "Orc", "Owlin", "Satyr", "Shadar-kai", "Shifter", "Tabaxi",
"Tiefling", "Tortle", "Triton", "Warforged", "Yuan-ti"
], multiselect=True, label="Character Race", info="You can choose multiple")
character_alignment = gr.Dropdown(["Lawful Good", "Neutral Good", "Chaotic Good", "Lawful Neutral", "True Neutral", "Chaotic Neutral", "Lawful Evil", "Neutral Evil", "Chaotic Evil"], label="Character Alignment", elem_classes="alignment_radio")
with gr.Tab("Step 2"):
with gr.Column(scale=2):
gr.ChatInterface(
fn=respond,
additional_inputs=[character_name, character_class, character_race, character_alignment], # Pass name into function!
type="messages",
examples=None
)
chatbot.launch() |