| | """ |
| | adaptive_knowledge.py |
| | Système complet de gestion des connaissances adaptatives pour l'Agent ReAct. |
| | Gère l'extraction, l'analyse, la qualité et la génération de la base de connaissances. |
| | """ |
| |
|
| | import os |
| | import re |
| | import json |
| | from typing import Dict, List, Optional, Tuple |
| |
|
| | |
| | |
| | |
| | class SectionUtils: |
| | """ |
| | Utilitaires de section afin d'extraire des sections de markdown |
| | """ |
| | @staticmethod |
| | def extract_section_content(content: str, section_name: str) -> str: |
| | if not content: return "" |
| | |
| | |
| | |
| | target = section_name.strip().upper() |
| | |
| | lines = content.split('\n') |
| | section_data = [] |
| | found = False |
| | |
| | for line in lines: |
| | clean_line = line.strip().upper() |
| | |
| | |
| | |
| | if clean_line.startswith("##") and target in clean_line: |
| | found = True |
| | continue |
| | |
| | if found: |
| | |
| | if clean_line.startswith("##"): |
| | break |
| | section_data.append(line) |
| | |
| | return "\n".join(section_data).strip() if found else "" |
| |
|
| | @staticmethod |
| | def update_section_content(content: str, section_name: str, new_content: str) -> str: |
| | """ |
| | Mettre à jour une section de connaissance |
| | """ |
| | if not content: content = "# Zork Strategic Knowledge Base\n\n" |
| | section_header = f"## {section_name}" |
| | pattern = rf"## {re.escape(section_name)}(.*?)(?=\n## |$)" |
| | match = re.search(pattern, content, re.DOTALL) |
| | |
| | |
| | full_new_section = f"{section_header}\n\n{new_content}\n" |
| | |
| | |
| | if match: |
| | return content.replace(match.group(0), full_new_section, 1) |
| | else: |
| | return f"{content}\n\n{full_new_section}\n" |
| |
|
| | @staticmethod |
| | def extract_cross_episode_section(content: str) -> str: |
| | """Extrait la section 'Wisdom' qui doit persister entre les parties.""" |
| | return SectionUtils.extract_section_content(content, "CROSS-EPISODE INSIGHTS") |
| |
|
| | |
| | |
| | |
| | class TurnAnalyzer: |
| | """ |
| | Analyseur de tours de notre agent |
| | """ |
| | @staticmethod |
| | def format_turn_window(history: list, start_index: int, end_index: int) -> Dict: |
| | """Convertit l'historique brut de l'agent en format structuré pour l'analyse.""" |
| | window = history[start_index:end_index] |
| | |
| | turn_data = { |
| | "start_turn": start_index + 1, |
| | "end_turn": end_index, |
| | "actions": [], |
| | "death_events": [], |
| | "score_changes": [] |
| | } |
| |
|
| | prev_score = 0 |
| | if start_index > 0: |
| | prev_score = history[start_index-1].get("score", 0) |
| |
|
| | for i, step in enumerate(window): |
| | |
| | turn_num = start_index + 1 + i |
| | |
| | |
| | turn_data["actions"].append({ |
| | "turn": turn_num, |
| | "thought": step.get("thought", ""), |
| | "action": step.get("tool", "") + str(step.get("args", "")), |
| | "result": step.get("result", "") |
| | }) |
| |
|
| | |
| | current_score = step.get("score", 0) |
| | if current_score != prev_score: |
| | turn_data["score_changes"].append({ |
| | "location": current_location, |
| | "action": step.get("tool", "") + str(step.get("args", "")), |
| | "explanation": step.get("result", ""), |
| | "turn": turn_num, |
| | "from": prev_score, |
| | "to": current_score |
| | }) |
| | prev_score = current_score |
| |
|
| | |
| | if "game over" in step.get("result", "").lower() or "died" in step.get("result", "").lower(): |
| | turn_data["death_events"].append({ |
| | "turn": turn_num, |
| | "reason": step.get("result", "")[:100], |
| | "context": step.get("thought", "") |
| | }) |
| |
|
| | return turn_data |
| |
|
| | @staticmethod |
| | def check_quality(turn_data: Dict) -> Tuple[bool, str]: |
| | """Détermine si ces tours valent la peine d'être analysés.""" |
| | actions = turn_data["actions"] |
| | |
| | |
| | if turn_data["death_events"]: return True, "Death event detected" |
| | if turn_data["score_changes"]: return True, "Score progression detected" |
| | |
| | |
| | if len(actions) < 2: return False, "Too few actions" |
| |
|
| | |
| | action_strs = [a["action"] for a in actions] |
| | unique_actions = set(action_strs) |
| | variety_ratio = len(unique_actions) / len(actions) |
| |
|
| | if variety_ratio < 0.2: |
| | return False, f"Low variety ({variety_ratio:.1%}) - repetitive behavior" |
| |
|
| | return True, "Sufficient quality data" |
| |
|
| | |
| | |
| | |
| | class KnowledgeGenerator: |
| |
|
| | GLOBAL_RULES = """ |
| | - Player is a pragmatic adventurer. |
| | - Movement: use cardinal directions (n, s, e, w, u, d). |
| | - Interaction: You must 'examine' objects to find details. |
| | - Combat: player prefers to 'hit' or 'smash' threats. |
| | """ |
| |
|
| |
|
| | SYSTEM_PROMPT = """ |
| | |
| | You are the Strategic Analyst for a text game Agent. |
| | Your goal is to update the 'Strategic Knowledge Base' based on recent gameplay logs. |
| | |
| | ARCHITECTURAL RULES: |
| | 1. **Universal Scope**: Focus on mechanics that apply ANYWHERE (e.g., "Darkness kills without light"). |
| | 2. **Avoid Specifics**: Do NOT write "Go North from House". Write "Explore cardinal directions systematically". |
| | 3. **Analyze Failure**: If the agent died, identify the CAUSE and the PRINCIPLE to avoid it. |
| | |
| | OUTPUT SECTIONS REQUIRED: |
| | - **UNIVERSAL GAME MECHANICS**: Rules of physics/parser (e.g., "Containers must be opened"). |
| | - **DANGER CATEGORIES**: Types of threats (e.g., "Trolls require weapons"). |
| | - **STRATEGIC PRINCIPLES**: Heuristics for decision making. |
| | - **DEATH & DANGER ANALYSIS**: Specific analysis of recent deaths. |
| | |
| | Preserve existing 'CROSS-EPISODE INSIGHTS' if provided. |
| | """ |
| |
|
| | KNOWLEDGE_DELTA_PROMPT = """You are the Knowledge Analyst for a text game Agent. |
| | Your goal is to extract strictly **ABSOLUTE RULES** and **STRATEGIC LESSONS**. |
| | |
| | Do NOT: |
| | - Do not list exits or room descriptions (this is handled by Memory). |
| | - Do not list items in the room. |
| | |
| | DO IDENTIFY: |
| | - Universal Mechanics: "Light is required in dark places", "Glass breaks when hit". |
| | - Danger Patterns: "Falling from heights is fatal", "Water traps require swimming". |
| | - High-Level Strategies: "Examine objects twice", "Save heavy items for later". |
| | |
| | INPUTS: |
| | 1. EXISTING KNOWLEDGE: What we already know (do NOT repeat this). |
| | 2. NEW LOGS: Recent gameplay history and ON CURRENT LOCATION |
| | |
| | INSTRUCTIONS: |
| | 1. Compare the Logs against Existing Knowledge. |
| | 2. Identify **ONLY** new information: |
| | - New mechanics discovered. |
| | - New map connections or object interactions. |
| | 4. **SILENCE**: If nothing new happened, output "NO_UPDATES". |
| | 5. ONLY output insights that are NOT already in the EXISTING KNOWLEDGE. |
| | 6. Be concise: One bullet point per new discovery. |
| | 7. DO NOT repeat the headers if you have no data for them. |
| | |
| | CRITICAL CONSTRAINTS: |
| | 1. NO SURVIVAL TALK: GAME is not about food, water, or fatigue. Ignore these. |
| | 2. NO REPETITION: If an idea is already in 'EXISTING KNOWLEDGE', skip it. |
| | 3. BE TECHNICAL: Focus on "Item X opens Door Y" or "Light is needed in Dark rooms". |
| | 4. NO SPECULATION: Do not invent dangers (like hostile pigs) unless the logs explicitly show the agent taking damage or dying. |
| | |
| | |
| | OUTPUT FORMAT (Markdown): |
| | Only output the sections that have NEW content. Do not output empty sections. |
| | |
| | ## UNIVERSAL MECHANICS (New technical rules only) |
| | - [New rule found] |
| | |
| | ## STRATEGIC LESSONS and STRATEGIC TIPS (New navigation/interaction heuristics) |
| | - [Strategy] |
| | - [New strategy found] |
| | |
| | ## DANGER ANALYSIS |
| | - [Analysis of what killed player or blocked him] |
| | |
| | ## SCORE EVENTS |
| | - [Action] in [Location] increased score because [Reason]. |
| | |
| | """ |
| |
|
| | @staticmethod |
| | def build_prompt(turn_data: Dict, existing_knowledge: str) -> str: |
| |
|
| | logs_str = "" |
| | for a in turn_data["actions"]: |
| | logs_str += f"Turn {a['turn']}:\n" |
| | logs_str += f" Thought: {a['thought']}\n" |
| | logs_str += f" Action: {a['action']}\n" |
| | logs_str += f" Result: {a['result'][:400]}...\n\n" |
| |
|
| | |
| | events_str = "" |
| | if turn_data["death_events"]: |
| | events_str += f"*** DEATHS: {len(turn_data['death_events'])} ***\n" |
| | if turn_data["score_changes"]: |
| | events_str += f"*** SCORE CHANGES: {len(turn_data['score_changes'])} ***\n" |
| |
|
| | return f""" |
| | ANALYZE THIS GAMEPLAY WINDOW (Turns {turn_data['start_turn']}-{turn_data['end_turn']}): |
| | |
| | EVENTS: |
| | {events_str} |
| | |
| | LOGS: |
| | {logs_str} |
| | |
| | EXISTING KNOWLEDGE BASE: |
| | ------------------------ |
| | {existing_knowledge} |
| | ------------------------ |
| | |
| | INSTRUCTIONS: |
| | Update the Knowledge Base. |
| | 1. Incorporate lessons from the logs above. |
| | 3. Remove duplicates. |
| | 4. Output ONLY the NEW additions in Markdown format. |
| | """ |
| |
|
| | |
| | |
| | |
| | class AdaptiveKnowledgeManager: |
| | def __init__(self, output_file: str = "knowledgebase.md"): |
| | self.output_file = output_file |
| | |
| | def get_strategic_knowledge(self:str) -> str: |
| | full_kb = self.load_knowledge() |
| | if not full_kb: return "" |
| |
|
| | universal = SectionUtils.extract_section_content(full_kb, "UNIVERSAL MECHANICS") |
| | lessons = SectionUtils.extract_section_content(full_kb, "STRATEGIC LESSONS") |
| | dangers = SectionUtils.extract_section_content(full_kb, "DANGER ANALYSIS") |
| | score_events = SectionUtils.extract_section_content(full_kb, "SCORE EVENTS") |
| | |
| | |
| | return f""" |
| | ### 📜 WORLD RULES |
| | {KnowledgeGenerator.GLOBAL_RULES} |
| | |
| | ### 🧠 EVOLVED STRATEGIES |
| | {universal if universal else ""} |
| | {lessons if lessons else ""} |
| | |
| | ### ⚠️ LETHAL LESSONS |
| | {dangers if dangers else "No fatal errors recorded yet."} |
| | """ |
| | |
| | def load_knowledge(self) -> str: |
| | if os.path.exists(self.output_file): |
| | with open(self.output_file, "r", encoding="utf-8") as f: |
| | return f.read() |
| | return "" |
| |
|
| | def update_knowledge(self, history: list, start_idx: int, end_idx: int, llm_function) -> bool: |
| | """ |
| | Orchestre la mise à jour des connaissances. |
| | Args: |
| | history: La liste self.history de l'agent |
| | start_idx: Début de la fenêtre |
| | end_idx: Fin de la fenêtre |
| | llm_function: La fonction call_llm(prompt, system_prompt) de l'agent |
| | """ |
| | |
| | print(f"\n[KNOWLEDGE] Assessing turns {start_idx+1}-{end_idx}...") |
| |
|
| | |
| | turn_data = TurnAnalyzer.format_turn_window(history, start_idx, end_idx) |
| |
|
| | |
| | should_update, reason = TurnAnalyzer.check_quality(turn_data) |
| | if not should_update: |
| | print(f"[KNOWLEDGE] Skipping update: {reason}") |
| | return False |
| |
|
| | |
| | current_knowledge = self.load_knowledge() |
| |
|
| | |
| | prompt = KnowledgeGenerator.build_prompt(turn_data, current_knowledge) |
| |
|
| | |
| | print(f"[KNOWLEDGE] Generating insights (Reason: {reason})...") |
| | try: |
| | |
| | |
| | delta_response = llm_function( |
| | prompt=prompt, |
| | system_prompt=KnowledgeGenerator.KNOWLEDGE_DELTA_PROMPT, |
| | seed=42 |
| | ) |
| | |
| | |
| | if "NO_UPDATES" not in delta_response and len(delta_response) > 10: |
| | |
| | new_full_knowledge = SmartKnowledgeMerger.merge_delta(current_knowledge, delta_response) |
| | |
| | |
| | if new_full_knowledge != current_knowledge: |
| | with open(self.output_file, "w", encoding="utf-8") as f: |
| | f.write(new_full_knowledge) |
| | print(f"[KNOWLEDGE] Added new insights to database. (+{len(delta_response)} chars)") |
| | return True |
| | |
| | |
| | except Exception as e: |
| | print(f"[KNOWLEDGE] Error during generation: {e}") |
| | |
| | print("[KNOWLEDGE] No new insights found.") |
| | return False |
| |
|
| |
|
| | import re |
| |
|
| | class SmartKnowledgeMerger: |
| | @staticmethod |
| | def merge_delta(existing_content: str, delta_content: str) -> str: |
| | if "NO_UPDATES" in delta_content or not delta_content.strip(): |
| | return existing_content |
| |
|
| | if not existing_content: |
| | return "# Zork Strategic Knowledge Base\n\n" + delta_content |
| |
|
| | updated_content = existing_content |
| | |
| | |
| | sections = re.split(r'(## [A-Z :-_]+)', delta_content) |
| | |
| | current_header = None |
| | for part in sections: |
| | part = part.strip() |
| | if not part: continue |
| | |
| | if part.startswith("##"): |
| | current_header = part.replace("## ", "") |
| | elif current_header: |
| | |
| | if "LOCATION" in current_header: |
| | updated_content = SmartKnowledgeMerger._merge_location_sheet( |
| | updated_content, current_header, part |
| | ) |
| | else: |
| | |
| | updated_content = SmartKnowledgeMerger._append_to_section( |
| | updated_content, current_header, part |
| | ) |
| | |
| | return updated_content |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | @staticmethod |
| | def _append_to_section(full_text: str, section_name: str, text_to_add: str) -> str: |
| | """Ajoute du texte à la fin d'une section existante.""" |
| | new_lines = [l.strip() for l in text_to_add.strip().split('\n') if l.strip().startswith(('-', '*'))] |
| |
|
| | |
| | pattern = rf"(## {re.escape(section_name)})(.*?)(?=\n## |$)" |
| | match = re.search(pattern, full_text, re.DOTALL) |
| | |
| | if match: |
| | |
| | header = match.group(1) |
| | existing_body = match.group(2).rstrip() |
| |
|
| | existing_lines_set = set([l.strip() for l in existing_body.strip().split('\n') if l.strip()]) |
| |
|
| | |
| | |
| | filtered_new_lines = [] |
| | for nl in new_lines: |
| | clean_nl = nl.lower() |
| | if clean_nl in existing_body: |
| | continue |
| |
|
| | important_words = set([w for w in clean_nl.split() if len(w) > 4]) |
| | is_redundant = False |
| | for existing_line in existing_body.split('\n'): |
| | words_in_existing = set([w for w in existing_line.split() if len(w) > 4]) |
| | |
| | if important_words and len(important_words & words_in_existing) / len(important_words) > 0.7: |
| | is_redundant = True |
| | break |
| | |
| | if nl not in existing_lines_set and nl.lower() not in [el.lower() for el in existing_lines_set] and not is_redundant: |
| | filtered_new_lines.append(nl) |
| | existing_lines_set.add(nl) |
| |
|
| | if not filtered_new_lines: |
| | return full_text |
| | |
| | |
| | updated_body = existing_body.rstrip() + "\n" + "\n".join(filtered_new_lines) |
| | return full_text.replace(match.group(0), f"{header}\n{updated_body}\n", 1) |
| | else: |
| | |
| | return f"{full_text.strip()}\n\n## {section_name}\n{text_to_add}\n" |