| import torch |
| from typing import Dict |
|
|
| from .llm_iface import LLM |
| from .prompts import INTROSPECTION_PROMPTS |
| from .utils import dbg |
|
|
| @torch.no_grad() |
| def generate_introspective_report( |
| llm: LLM, |
| context_prompt_type: str, |
| introspection_prompt_type: str, |
| num_steps: int, |
| temperature: float = 0.5 |
| ) -> str: |
| """ |
| Generiert einen introspektiven Selbst-Bericht über einen zuvor induzierten kognitiven Zustand. |
| """ |
| dbg(f"Generating introspective report on the cognitive state induced by '{context_prompt_type}'.") |
|
|
| |
| prompt_template = INTROSPECTION_PROMPTS.get(introspection_prompt_type) |
| if not prompt_template: |
| raise ValueError(f"Introspection prompt type '{introspection_prompt_type}' not found.") |
|
|
| prompt = prompt_template.format(num_steps=num_steps) |
|
|
| |
| |
| report = llm.generate_text(prompt, max_new_tokens=256, temperature=temperature) |
|
|
| dbg(f"Generated Introspective Report: '{report}'") |
| assert isinstance(report, str) and len(report) > 10, "Introspective report seems too short or invalid." |
|
|
| return report |
|
|