Transformers
Italian
English
semantic-search
explainable-ai
faiss
ai-ethics
responsible-ai
llm
prompt-engineering
multimodal-ai
ai-transparency
ethical-intelligence
explainable-llm
cognitive-ai
ethical-ai
scientific-retrieval
modular-ai
memory-augmented-llm
trustworthy-ai
reasoning-engine
ai-alignment
next-gen-llm
thinking-machines
open-source-ai
explainability
ai-research
semantic audit
cognitive agent
human-centered-ai
Create metacognitive_reasoning.py
Browse files
src/reasoning/metacognitive_reasoning.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# © 2025 Elena Marziali — Code released under Apache 2.0 license.
|
| 2 |
+
# See LICENSE in the repository for details.
|
| 3 |
+
# Removal of this copyright is prohibited.
|
| 4 |
+
|
| 5 |
+
# This function simulates an intentional decision-making process by the AI agent.
|
| 6 |
+
# It analyzes the proposed action in relation to the goal, available alternatives, and context.
|
| 7 |
+
# Metacognition functions that adapt to the system
|
| 8 |
+
def execute_intentional_choice(action, goal, alternatives, context):
|
| 9 |
+
ai_explanation = choice_with_intention(action, goal, alternatives, context)
|
| 10 |
+
explanation_content = getattr(ai_explanation, "content", str(ai_explanation)).strip()
|
| 11 |
+
|
| 12 |
+
intentional_log.append({
|
| 13 |
+
"action": action,
|
| 14 |
+
"reason": explanation_content,
|
| 15 |
+
"impact": f"Expected outcome for goal: {goal}",
|
| 16 |
+
"timestamp": datetime.datetime.utcnow().isoformat()
|
| 17 |
+
})
|
| 18 |
+
|
| 19 |
+
return explanation_content
|
| 20 |
+
|
| 21 |
+
# Generates a response with intentionality by combining reasoning, AI response, and extracted text
|
| 22 |
+
def generate_response_with_intention(prompt, action, goal, alternatives, context):
|
| 23 |
+
reasoning = execute_intentional_choice(action, goal, alternatives, context)
|
| 24 |
+
ai_response = llm.invoke(prompt)
|
| 25 |
+
response_text = getattr(ai_response, "content", str(ai_response)).strip()
|
| 26 |
+
|
| 27 |
+
return f"{response_text}\n\n*Agent's intentional explanation:*\n{reasoning}"
|