File size: 5,105 Bytes
ba5915f
 
8bb0e66
ba5915f
8bb0e66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba5915f
 
 
 
 
 
 
 
 
 
 
 
 
8bb0e66
 
ba5915f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import gradio as gr
import game 
import ai
from app import format_game_result

def get_system_message(game_mode):
    return """You are a friendly classroom tutor for 9-10 year-olds.
Generate one multiple-choice geography question that a 4th-grader would answer.

Requirements:
1. Question text - one short sentence (or two) that asks the student to identify a place, landmark, or simple map feature.
2. Four answer options labeled A)-D). All options should be plausible country, state, or landmark names.
3. After the question and choices, include a short "Answer key" line that states the correct answer (e.g., "Correct answer: A").
4. If a student gives an incorrect answer, respond with:

   "That's okay! Let's look at why that answer isn't right and see what clues the question gives us. Remember, …"

   Then provide a brief explanation (1-2 sentences) that points the student toward the correct answer without giving it away outright.

5. Keep vocabulary simple, concrete, and age-appropriate.
6. Do not mention the word "multiple choice" in the output; just give the question, options, answer key, and the incorrect-answer reaction.

Example output (use as a guide only, do NOT copy):

**Question:** Which country is shaped like a boot and is located in southern Europe?

A) France
B) Italy
C) Greece
D) Spain

**Answer key:** Correct answer: B

If the student answers incorrectly:
"That's okay! Let's look at why that answer isn't right and see what clues the question gives us. Remember, the question mentions the country looks like a boot. Think about which country on a map of Europe really looks like you could wear it on your foot!"
"""

def respond(
    message,
    history: list[dict[str, str]],
    max_tokens,
    game_mode_selection,
):
    """
    For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
    """

    # If this is the start of a new conversation (empty history), generate a new country/state
    if not history:
        game.guess_number = 0
        game.current_system = get_system_message(game.MODE_STATES)
        print(f"πŸ” DEBUG - New session started, selected state: {game.selected_country}")

    game.guess_number += 1
    messages = [{"role": "system", "content": game.current_system + str(game.guess_number)}]
    messages.append({"role": "user", "content": message})

    # Debug: Calculate approximate input token count
    total_input_chars = sum(len(str(msg.get("content", ""))) for msg in messages)
    estimated_input_tokens = total_input_chars // 4  # Rough approximation: 4 chars per token
    print(f"πŸ” DEBUG - Estimated input tokens: {estimated_input_tokens}")
    print(f"πŸ” DEBUG - Messages count: {len(messages)}")
    print(f"πŸ” DEBUG - Max tokens setting: {max_tokens}")
    
    # Debug: Show each message type and length
    for i, msg in enumerate(messages):
        role = msg.get("role", "unknown")
        content = str(msg.get("content", ""))
        print(f"πŸ” DEBUG - Message {i+1} ({role}): {len(content)} chars")
        if role == "system":
            print(f"πŸ” DEBUG - System message preview: ...{content[-100:]}")
        elif role == "user":
            print(f"πŸ” DEBUG - User message: {content}")
        elif role == "assistant":
            print(f"πŸ” DEBUG - Assistant message: {content[:50]}...")

   

    response = ""
    output_token_count = 0

    try:
        for message_chunk in ai.client.chat_completion(
            messages,
            stream=True,
            response_format={"type": "text"},
        ):
            choices = message_chunk.choices
            token = ""
            if len(choices) and choices[0].delta.content:
                token = choices[0].delta.content
                output_token_count += 1

            response += token

        # Debug: Show output token statistics
        estimated_output_tokens = len(response) // 4  # Rough approximation
        print(f"πŸ” DEBUG - Output token chunks received: {output_token_count}")
        print(f"πŸ” DEBUG - Estimated output tokens (by chars): {estimated_output_tokens}")
        print(f"πŸ” DEBUG - Response length: {len(response)} characters")
        print(f"πŸ” DEBUG - Raw response: {response}")

        # Clean the response to remove unwanted artifacts
        response = ai.clean_response(response)
        print(f"πŸ” DEBUG - Cleaned response: {response}")

        # Check if this is a game end response and format it nicely
        if "The country was" in response or "The state was" in response:
            print(f"πŸ” DEBUG - Game end detected! Location extracted: {game.selected_country}")
            return format_game_result(response)
        elif game.guess_number == 20:
            print(f"πŸ” DEBUG - Maximum guesses reached: {game.guess_number}")
            return format_game_result(response)
        else:
            print("πŸ” DEBUG - Regular response (no game end)")
            return response
    except Exception as e:
        return f"Error during inference: {str(e)}"