text-adventure-template / adaptive_knowledge.py
OctaveLeroy's picture
Upload 9 files
1f5351c verified
"""
adaptive_knowledge.py
Système complet de gestion des connaissances adaptatives pour l'Agent ReAct.
Gère l'extraction, l'analyse, la qualité et la génération de la base de connaissances.
"""
import os
import re
import json
from typing import Dict, List, Optional, Tuple
# =============================================================================
# 1. UTILITAIRES DE SECTIONS
# =============================================================================
class SectionUtils:
"""
Utilitaires de section afin d'extraire des sections de markdown
"""
@staticmethod
def extract_section_content(content: str, section_name: str) -> str:
if not content: return ""
# On normalise le nom recherché pour la comparaison
# "LOCATION: Cave With Stream" -> "LOCATION: CAVE WITH STREAM"
target = section_name.strip().upper()
lines = content.split('\n')
section_data = []
found = False
for line in lines:
clean_line = line.strip().upper()
# Détection du header (on vérifie si la ligne contient le nom de la section)
# Cela règle le problème des "## LOCATION: O\nutside"
if clean_line.startswith("##") and target in clean_line:
found = True
continue
if found:
# Si on croise un autre header, on s'arrête
if clean_line.startswith("##"):
break
section_data.append(line)
return "\n".join(section_data).strip() if found else ""
@staticmethod
def update_section_content(content: str, section_name: str, new_content: str) -> str:
"""
Mettre à jour une section de connaissance
"""
if not content: content = "# Zork Strategic Knowledge Base\n\n"
section_header = f"## {section_name}"
pattern = rf"## {re.escape(section_name)}(.*?)(?=\n## |$)"
match = re.search(pattern, content, re.DOTALL)
# Ajout de la nouvelle section
full_new_section = f"{section_header}\n\n{new_content}\n"
# Si on a déjà une section on remplace sinon ajoute
if match:
return content.replace(match.group(0), full_new_section, 1)
else:
return f"{content}\n\n{full_new_section}\n"
@staticmethod
def extract_cross_episode_section(content: str) -> str:
"""Extrait la section 'Wisdom' qui doit persister entre les parties."""
return SectionUtils.extract_section_content(content, "CROSS-EPISODE INSIGHTS")
# =============================================================================
# 2. ANALYSEUR DE TOURS (Turn Extraction & Quality)
# =============================================================================
class TurnAnalyzer:
"""
Analyseur de tours de notre agent
"""
@staticmethod
def format_turn_window(history: list, start_index: int, end_index: int) -> Dict:
"""Convertit l'historique brut de l'agent en format structuré pour l'analyse."""
window = history[start_index:end_index]
turn_data = {
"start_turn": start_index + 1,
"end_turn": end_index,
"actions": [],
"death_events": [],
"score_changes": []
}
prev_score = 0
if start_index > 0:
prev_score = history[start_index-1].get("score", 0)
for i, step in enumerate(window):
# Pour chaque élément de la fenêtre
turn_num = start_index + 1 + i
# Action data
turn_data["actions"].append({
"turn": turn_num,
"thought": step.get("thought", ""),
"action": step.get("tool", "") + str(step.get("args", "")),
"result": step.get("result", "")
})
# Score tracking, tracking du score
current_score = step.get("score", 0)
if current_score != prev_score:
turn_data["score_changes"].append({
"location": current_location,
"action": step.get("tool", "") + str(step.get("args", "")),
"explanation": step.get("result", ""),
"turn": turn_num,
"from": prev_score,
"to": current_score
})
prev_score = current_score
# Death detection
if "game over" in step.get("result", "").lower() or "died" in step.get("result", "").lower():
turn_data["death_events"].append({
"turn": turn_num,
"reason": step.get("result", "")[:100], # Short preview
"context": step.get("thought", "")
})
return turn_data
@staticmethod
def check_quality(turn_data: Dict) -> Tuple[bool, str]:
"""Détermine si ces tours valent la peine d'être analysés."""
actions = turn_data["actions"]
# Toujours analyser les changements de scores ou les morts
if turn_data["death_events"]: return True, "Death event detected"
if turn_data["score_changes"]: return True, "Score progression detected"
# Véritifer si on a assez d'actions
if len(actions) < 2: return False, "Too few actions"
# 3. Vérifier la variété (éviter d'analyser quand l'agent boucle)
action_strs = [a["action"] for a in actions]
unique_actions = set(action_strs)
variety_ratio = len(unique_actions) / len(actions)
if variety_ratio < 0.2:
return False, f"Low variety ({variety_ratio:.1%}) - repetitive behavior"
return True, "Sufficient quality data"
# =============================================================================
# 3. GÉNÉRATEUR DE CONNAISSANCES (LLM Prompts)
# =============================================================================
class KnowledgeGenerator:
GLOBAL_RULES = """
- Player is a pragmatic adventurer.
- Movement: use cardinal directions (n, s, e, w, u, d).
- Interaction: You must 'examine' objects to find details.
- Combat: player prefers to 'hit' or 'smash' threats.
"""
SYSTEM_PROMPT = """
You are the Strategic Analyst for a text game Agent.
Your goal is to update the 'Strategic Knowledge Base' based on recent gameplay logs.
ARCHITECTURAL RULES:
1. **Universal Scope**: Focus on mechanics that apply ANYWHERE (e.g., "Darkness kills without light").
2. **Avoid Specifics**: Do NOT write "Go North from House". Write "Explore cardinal directions systematically".
3. **Analyze Failure**: If the agent died, identify the CAUSE and the PRINCIPLE to avoid it.
OUTPUT SECTIONS REQUIRED:
- **UNIVERSAL GAME MECHANICS**: Rules of physics/parser (e.g., "Containers must be opened").
- **DANGER CATEGORIES**: Types of threats (e.g., "Trolls require weapons").
- **STRATEGIC PRINCIPLES**: Heuristics for decision making.
- **DEATH & DANGER ANALYSIS**: Specific analysis of recent deaths.
Preserve existing 'CROSS-EPISODE INSIGHTS' if provided.
"""
KNOWLEDGE_DELTA_PROMPT = """You are the Knowledge Analyst for a text game Agent.
Your goal is to extract strictly **ABSOLUTE RULES** and **STRATEGIC LESSONS**.
Do NOT:
- Do not list exits or room descriptions (this is handled by Memory).
- Do not list items in the room.
DO IDENTIFY:
- Universal Mechanics: "Light is required in dark places", "Glass breaks when hit".
- Danger Patterns: "Falling from heights is fatal", "Water traps require swimming".
- High-Level Strategies: "Examine objects twice", "Save heavy items for later".
INPUTS:
1. EXISTING KNOWLEDGE: What we already know (do NOT repeat this).
2. NEW LOGS: Recent gameplay history and ON CURRENT LOCATION
INSTRUCTIONS:
1. Compare the Logs against Existing Knowledge.
2. Identify **ONLY** new information:
- New mechanics discovered.
- New map connections or object interactions.
4. **SILENCE**: If nothing new happened, output "NO_UPDATES".
5. ONLY output insights that are NOT already in the EXISTING KNOWLEDGE.
6. Be concise: One bullet point per new discovery.
7. DO NOT repeat the headers if you have no data for them.
CRITICAL CONSTRAINTS:
1. NO SURVIVAL TALK: GAME is not about food, water, or fatigue. Ignore these.
2. NO REPETITION: If an idea is already in 'EXISTING KNOWLEDGE', skip it.
3. BE TECHNICAL: Focus on "Item X opens Door Y" or "Light is needed in Dark rooms".
4. NO SPECULATION: Do not invent dangers (like hostile pigs) unless the logs explicitly show the agent taking damage or dying.
OUTPUT FORMAT (Markdown):
Only output the sections that have NEW content. Do not output empty sections.
## UNIVERSAL MECHANICS (New technical rules only)
- [New rule found]
## STRATEGIC LESSONS and STRATEGIC TIPS (New navigation/interaction heuristics)
- [Strategy]
- [New strategy found]
## DANGER ANALYSIS
- [Analysis of what killed player or blocked him]
## SCORE EVENTS
- [Action] in [Location] increased score because [Reason].
"""
@staticmethod
def build_prompt(turn_data: Dict, existing_knowledge: str) -> str:
logs_str = ""
for a in turn_data["actions"]:
logs_str += f"Turn {a['turn']}:\n"
logs_str += f" Thought: {a['thought']}\n"
logs_str += f" Action: {a['action']}\n"
logs_str += f" Result: {a['result'][:400]}...\n\n" # Truncate
# Events summary
events_str = ""
if turn_data["death_events"]:
events_str += f"*** DEATHS: {len(turn_data['death_events'])} ***\n"
if turn_data["score_changes"]:
events_str += f"*** SCORE CHANGES: {len(turn_data['score_changes'])} ***\n"
return f"""
ANALYZE THIS GAMEPLAY WINDOW (Turns {turn_data['start_turn']}-{turn_data['end_turn']}):
EVENTS:
{events_str}
LOGS:
{logs_str}
EXISTING KNOWLEDGE BASE:
------------------------
{existing_knowledge}
------------------------
INSTRUCTIONS:
Update the Knowledge Base.
1. Incorporate lessons from the logs above.
3. Remove duplicates.
4. Output ONLY the NEW additions in Markdown format.
"""
# =============================================================================
# 4. LE MANAGER PRINCIPAL (L'Orchestrateur)
# =============================================================================
class AdaptiveKnowledgeManager:
def __init__(self, output_file: str = "knowledgebase.md"):
self.output_file = output_file
def get_strategic_knowledge(self:str) -> str:
full_kb = self.load_knowledge()
if not full_kb: return ""
universal = SectionUtils.extract_section_content(full_kb, "UNIVERSAL MECHANICS")
lessons = SectionUtils.extract_section_content(full_kb, "STRATEGIC LESSONS")
dangers = SectionUtils.extract_section_content(full_kb, "DANGER ANALYSIS")
score_events = SectionUtils.extract_section_content(full_kb, "SCORE EVENTS")
return f"""
### 📜 WORLD RULES
{KnowledgeGenerator.GLOBAL_RULES}
### 🧠 EVOLVED STRATEGIES
{universal if universal else ""}
{lessons if lessons else ""}
### ⚠️ LETHAL LESSONS
{dangers if dangers else "No fatal errors recorded yet."}
"""
def load_knowledge(self) -> str:
if os.path.exists(self.output_file):
with open(self.output_file, "r", encoding="utf-8") as f:
return f.read()
return ""
def update_knowledge(self, history: list, start_idx: int, end_idx: int, llm_function) -> bool:
"""
Orchestre la mise à jour des connaissances.
Args:
history: La liste self.history de l'agent
start_idx: Début de la fenêtre
end_idx: Fin de la fenêtre
llm_function: La fonction call_llm(prompt, system_prompt) de l'agent
"""
print(f"\n[KNOWLEDGE] Assessing turns {start_idx+1}-{end_idx}...")
# 1. Extraction et formatage
turn_data = TurnAnalyzer.format_turn_window(history, start_idx, end_idx)
# 2. Vérification qualité (Est-ce que ça vaut le coup ?)
should_update, reason = TurnAnalyzer.check_quality(turn_data)
if not should_update:
print(f"[KNOWLEDGE] Skipping update: {reason}")
return False
# 3. Chargement existant
current_knowledge = self.load_knowledge()
# 4. Construction du Prompt
prompt = KnowledgeGenerator.build_prompt(turn_data, current_knowledge)
# 5. Appel LLM (Génération)
print(f"[KNOWLEDGE] Generating insights (Reason: {reason})...")
try:
# On utilise une température un peu plus haute pour la créativité analytique
# CORRECTION 1: On nomme la variable delta_response pour la suite
delta_response = llm_function(
prompt=prompt,
system_prompt=KnowledgeGenerator.KNOWLEDGE_DELTA_PROMPT,
seed=42
)
# CORRECTION 2: Indentation alignée avec delta_response
if "NO_UPDATES" not in delta_response and len(delta_response) > 10:
# C'est ici que la magie opère : Python colle les bouts
new_full_knowledge = SmartKnowledgeMerger.merge_delta(current_knowledge, delta_response)
# Sauvegarde (Attention: indentation DANS le if précédent car new_full_knowledge n'existe que si on rentre ici)
if new_full_knowledge != current_knowledge:
with open(self.output_file, "w", encoding="utf-8") as f:
f.write(new_full_knowledge)
print(f"[KNOWLEDGE] Added new insights to database. (+{len(delta_response)} chars)")
return True
# CORRECTION 3: Ajout du bloc except obligatoire
except Exception as e:
print(f"[KNOWLEDGE] Error during generation: {e}")
print("[KNOWLEDGE] No new insights found.")
return False
import re
class SmartKnowledgeMerger:
@staticmethod
def merge_delta(existing_content: str, delta_content: str) -> str:
if "NO_UPDATES" in delta_content or not delta_content.strip():
return existing_content
if not existing_content:
return "# Zork Strategic Knowledge Base\n\n" + delta_content
updated_content = existing_content
# On découpe par sections principales (## HEADER)
sections = re.split(r'(## [A-Z :-_]+)', delta_content)
current_header = None
for part in sections:
part = part.strip()
if not part: continue
if part.startswith("##"):
current_header = part.replace("## ", "")
elif current_header:
# Si c'est une fiche lieu, on utilise un traitement spécial
if "LOCATION" in current_header:
updated_content = SmartKnowledgeMerger._merge_location_sheet(
updated_content, current_header, part
)
else:
# Traitement standard pour UNIVERSAL MECHANICS, DANGERS, etc.
updated_content = SmartKnowledgeMerger._append_to_section(
updated_content, current_header, part
)
return updated_content
# @staticmethod
# def _merge_location_sheet(full_text: str, location_header: str, new_sheet_data: str) -> str:
# """Fusionne une fiche lieu en mettant à jour les champs spécifiques."""
# pattern = rf"(## {re.escape(location_header)})(.*?)(?=\n## |$)"
# match = re.search(pattern, full_text, re.DOTALL)
# if not match:
# # Nouveau lieu, on l'ajoute simplement
# return f"{full_text.strip()}\n\n## {location_header}\n{new_sheet_data}\n"
# existing_body = match.group(2)
# # On fusionne les lignes préfixées (ex: - **EXITS**:, - **KNOWLEDGE**:)
# # Cette logique permet d'écraser les listes d'objets ou d'ajouter des faits
# new_lines = new_sheet_data.strip().split('\n')
# updated_body = existing_body
# for line in new_lines:
# line = line.strip()
# if not line: continue
# # Si la ligne est une catégorie (ex: - **ENTITIES**:), on vérifie si elle existe
# prefix_match = re.match(r"(- \*\*[A-Z ]+\*\*):", line)
# if prefix_match:
# prefix = prefix_match.group(1)
# # Si le préfixe existe déjà, on remplace la ligne (car les listes d'objets changent)
# if prefix in updated_body:
# updated_body = re.sub(rf"{re.escape(prefix)}:.*", line, updated_body)
# else:
# updated_body += f"\n{line}"
# else:
# # C'est un point de connaissance (Knowledge), on l'ajoute via _append_to_section logic
# if line.lower() not in updated_body.lower():
# updated_body += f"\n{line}"
# return full_text.replace(match.group(0), f"## {location_header}\n{updated_body}\n", 1)
@staticmethod
def _append_to_section(full_text: str, section_name: str, text_to_add: str) -> str:
"""Ajoute du texte à la fin d'une section existante."""
new_lines = [l.strip() for l in text_to_add.strip().split('\n') if l.strip().startswith(('-', '*'))]
# Chercher la section
pattern = rf"(## {re.escape(section_name)})(.*?)(?=\n## |$)"
match = re.search(pattern, full_text, re.DOTALL)
if match:
# La section existe, on ajoute à la fin
header = match.group(1)
existing_body = match.group(2).rstrip()
existing_lines_set = set([l.strip() for l in existing_body.strip().split('\n') if l.strip()])
filtered_new_lines = []
for nl in new_lines:
clean_nl = nl.lower()
if clean_nl in existing_body:
continue
important_words = set([w for w in clean_nl.split() if len(w) > 4])
is_redundant = False
for existing_line in existing_body.split('\n'):
words_in_existing = set([w for w in existing_line.split() if len(w) > 4])
# Si plus de 70% des mots importants sont déjà dans une ligne existante
if important_words and len(important_words & words_in_existing) / len(important_words) > 0.7:
is_redundant = True
break
if nl not in existing_lines_set and nl.lower() not in [el.lower() for el in existing_lines_set] and not is_redundant:
filtered_new_lines.append(nl)
existing_lines_set.add(nl) # On l'ajoute au set pour éviter les doublons au sein du même delta
if not filtered_new_lines:
return full_text # Rien de nouveau, on ne touche à rien
# 4. Reconstruction propre
updated_body = existing_body.rstrip() + "\n" + "\n".join(filtered_new_lines)
return full_text.replace(match.group(0), f"{header}\n{updated_body}\n", 1)
else:
# La section n'existe pas, on la crée à la fin
return f"{full_text.strip()}\n\n## {section_name}\n{text_to_add}\n"