Update app.py
Browse files
app.py
CHANGED
|
@@ -1,34 +1,78 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
def respond(
|
| 6 |
message,
|
| 7 |
history: list[dict[str, str]],
|
| 8 |
-
system_message,
|
| 9 |
max_tokens,
|
| 10 |
temperature,
|
| 11 |
top_p,
|
| 12 |
hf_token: gr.OAuthToken,
|
|
|
|
| 13 |
):
|
| 14 |
"""
|
| 15 |
-
|
|
|
|
| 16 |
"""
|
| 17 |
-
client = InferenceClient(token=hf_token.token, model=
|
| 18 |
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
|
|
|
|
|
|
| 21 |
messages.extend(history)
|
| 22 |
-
|
| 23 |
messages.append({"role": "user", "content": message})
|
| 24 |
|
| 25 |
response = ""
|
| 26 |
|
|
|
|
| 27 |
for message in client.chat_completion(
|
| 28 |
messages,
|
| 29 |
max_tokens=max_tokens,
|
| 30 |
stream=True,
|
| 31 |
-
temperature=temperature,
|
| 32 |
top_p=top_p,
|
| 33 |
):
|
| 34 |
choices = message.choices
|
|
@@ -40,31 +84,56 @@ def respond(
|
|
| 40 |
yield response
|
| 41 |
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
with gr.Sidebar():
|
| 65 |
gr.LoginButton()
|
| 66 |
-
chatbot.render()
|
| 67 |
-
|
| 68 |
|
| 69 |
if __name__ == "__main__":
|
| 70 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
|
| 4 |
+
# --- CONFIGURATION ---
|
| 5 |
+
# We use the Instruct version of the 1B model as it follows rules best
|
| 6 |
+
MODEL_ID = "meta-llama/Llama-3.2-1B-Instruct"
|
| 7 |
+
|
| 8 |
+
# --- THE RIDDLES (DATABASE) ---
|
| 9 |
+
# Each riddle has a public 'scenario' and a hidden 'solution'.
|
| 10 |
+
RIDDLES = {
|
| 11 |
+
"The Dead Man": {
|
| 12 |
+
"scenario": "A man lies dead in a field. Next to him is an unopened package. There are no footprints around him. How did he die?",
|
| 13 |
+
"solution": "The man jumped from a plane. The package was his parachute, which failed to open."
|
| 14 |
+
},
|
| 15 |
+
"The Barman": {
|
| 16 |
+
"scenario": "A man walks into a bar and asks for a glass of water. The barman pulls out a gun and points it at him. The man says 'Thank you' and walks out. Why?",
|
| 17 |
+
"solution": "The man had the hiccups. The barman used the gun to scare him, curing the hiccups. The man was thankful."
|
| 18 |
+
},
|
| 19 |
+
"The Cabin": {
|
| 20 |
+
"scenario": "Two men are in a cabin in the woods. One is dead. The cabin did not burn down, but the dead man is charred. How happened?",
|
| 21 |
+
"solution": "The cabin is the cabin of a crashed airplane. The man died in the crash/fire."
|
| 22 |
+
},
|
| 23 |
+
"The Suitcase": {
|
| 24 |
+
"scenario": "A woman opens her suitcase and finds a dead man inside. She is not arrested or afraid. Why?",
|
| 25 |
+
"solution": "The 'suitcase' is actually a coffin. The woman is attending a funeral."
|
| 26 |
+
}
|
| 27 |
+
}
|
| 28 |
|
| 29 |
def respond(
|
| 30 |
message,
|
| 31 |
history: list[dict[str, str]],
|
| 32 |
+
system_message, # We will ignore the user input for this and build our own
|
| 33 |
max_tokens,
|
| 34 |
temperature,
|
| 35 |
top_p,
|
| 36 |
hf_token: gr.OAuthToken,
|
| 37 |
+
selected_riddle, # The dropdown input
|
| 38 |
):
|
| 39 |
"""
|
| 40 |
+
This function handles the game logic. It injects the hidden solution
|
| 41 |
+
into the system prompt based on the user's selection.
|
| 42 |
"""
|
| 43 |
+
client = InferenceClient(token=hf_token.token, model=MODEL_ID)
|
| 44 |
|
| 45 |
+
# 1. Get the current riddle data
|
| 46 |
+
current_game = RIDDLES[selected_riddle]
|
| 47 |
+
|
| 48 |
+
# 2. Construct the Strict Game Master Prompt
|
| 49 |
+
# This is where the "Context Learning" happens. We teach the model the rules in-context.
|
| 50 |
+
game_master_prompt = (
|
| 51 |
+
f"You are the Game Master of a lateral thinking puzzle. "
|
| 52 |
+
f"CURRENT PUZZLE SCENARIO: '{current_game['scenario']}' "
|
| 53 |
+
f"HIDDEN SOLUTION (User does not know this!): '{current_game['solution']}' "
|
| 54 |
+
f"\n\n"
|
| 55 |
+
f"RULES FOR YOU:"
|
| 56 |
+
f"1. The user will ask questions to figure out the solution."
|
| 57 |
+
f"2. You must analyze their question against the HIDDEN SOLUTION."
|
| 58 |
+
f"3. Answer ONLY with 'Yes', 'No', or 'Irrelevant'. "
|
| 59 |
+
f"4. If the user guesses the solution correctly, say 'CORRECT! You solved it: [Explain solution]'."
|
| 60 |
+
f"5. Do NOT give hints. Do NOT explain your 'Yes/No' answers."
|
| 61 |
+
)
|
| 62 |
|
| 63 |
+
# 3. Build message history
|
| 64 |
+
messages = [{"role": "system", "content": game_master_prompt}]
|
| 65 |
messages.extend(history)
|
|
|
|
| 66 |
messages.append({"role": "user", "content": message})
|
| 67 |
|
| 68 |
response = ""
|
| 69 |
|
| 70 |
+
# 4. Stream the response
|
| 71 |
for message in client.chat_completion(
|
| 72 |
messages,
|
| 73 |
max_tokens=max_tokens,
|
| 74 |
stream=True,
|
| 75 |
+
temperature=temperature, # Low temp = more strict adherence to Yes/No
|
| 76 |
top_p=top_p,
|
| 77 |
):
|
| 78 |
choices = message.choices
|
|
|
|
| 84 |
yield response
|
| 85 |
|
| 86 |
|
| 87 |
+
# --- UI SETUP ---
|
| 88 |
+
# We use Blocks to add the Dropdown menu cleanly above the chat
|
| 89 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 90 |
+
|
| 91 |
+
# Header
|
| 92 |
+
gr.Markdown("# 🕵️ The Yes/No Detective")
|
| 93 |
+
gr.Markdown(
|
| 94 |
+
"**Instructions:** I am thinking of a strange situation. "
|
| 95 |
+
"Ask me questions to figure out the truth! I can only answer **Yes**, **No**, or **Irrelevant**."
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
with gr.Row():
|
| 99 |
+
# The Dropdown to select the "Level"
|
| 100 |
+
riddle_select = gr.Dropdown(
|
| 101 |
+
choices=list(RIDDLES.keys()),
|
| 102 |
+
value="The Dead Man",
|
| 103 |
+
label="Select a Mystery Case",
|
| 104 |
+
interactive=True
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
# Display the current scenario description dynamically
|
| 108 |
+
scenario_display = gr.Textbox(
|
| 109 |
+
label="The Scenario (Clue)",
|
| 110 |
+
value=RIDDLES["The Dead Man"]["scenario"],
|
| 111 |
+
interactive=False
|
| 112 |
+
)
|
| 113 |
|
| 114 |
+
# Function to update the text box when dropdown changes
|
| 115 |
+
def update_scenario(choice):
|
| 116 |
+
return RIDDLES[choice]["scenario"]
|
| 117 |
+
|
| 118 |
+
riddle_select.change(fn=update_scenario, inputs=riddle_select, outputs=scenario_display)
|
| 119 |
+
|
| 120 |
+
# The Chat Interface
|
| 121 |
+
# Note: We hide the system message input because we hardcode it in the function
|
| 122 |
+
chatbot = gr.ChatInterface(
|
| 123 |
+
respond,
|
| 124 |
+
type="messages",
|
| 125 |
+
additional_inputs=[
|
| 126 |
+
gr.Textbox(value="Game Master", visible=False), # Hidden system prompt placeholder
|
| 127 |
+
gr.Slider(minimum=1, maximum=512, value=100, step=1, label="Max tokens", visible=False),
|
| 128 |
+
gr.Slider(minimum=0.1, maximum=2.0, value=0.2, step=0.1, label="Temperature"), # Low temp for precision
|
| 129 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"),
|
| 130 |
+
riddle_select # Pass the selected riddle to the function
|
| 131 |
+
],
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
# Sidebar for login
|
| 135 |
with gr.Sidebar():
|
| 136 |
gr.LoginButton()
|
|
|
|
|
|
|
| 137 |
|
| 138 |
if __name__ == "__main__":
|
| 139 |
+
demo.launch()
|