Transformers
Italian
English
semantic-search
explainable-ai
faiss
ai-ethics
responsible-ai
llm
prompt-engineering
multimodal-ai
ai-transparency
ethical-intelligence
explainable-llm
cognitive-ai
ethical-ai
scientific-retrieval
modular-ai
memory-augmented-llm
trustworthy-ai
reasoning-engine
ai-alignment
next-gen-llm
thinking-machines
open-source-ai
explainability
ai-research
semantic audit
cognitive agent
human-centered-ai
File size: 7,865 Bytes
d0d31ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
# © 2025 Elena Marziali — Code released under Apache 2.0 license.
# See LICENSE in the repository for details.
# Removal of this copyright is prohibited.
# === metacognitive_cycle ===
# Executes an iterative cycle of evaluation and improvement of the generated response.
# Combines qualitative feedback and semantic coherence score to decide whether to reformulate.
# Useful for simulating reflective and adaptive behavior.
def generate_objective_from_input(user_input):
"""
Generates a high-level operational objective based on the user's input.
Useful for AGI-style planning and decision-making.
"""
prompt = f"""
You are an autonomous scientific agent. Based on the following input:
"{user_input}"
Define a clear and actionable objective that guides the agent's next steps.
"""
try:
response = llm.invoke(prompt.strip())
return getattr(response, "content", str(response)).strip()
except Exception as e:
logging.error(f"Error generating objective: {e}")
return "Objective generation failed."
def metacognitive_cycle(question, level, max_iter=2):
response = llm.invoke(question)
response_text = extract_text_from_ai(response)
for i in range(max_iter):
feedback = auto_feedback_response(question, response_text, level)
score = evaluate_coherence(question, response_text)
print(f"\nIteration {i+1} – Coherence: {score:.3f}")
print("Feedback:", extract_text_from_ai(feedback))
if score < 0.7:
response_text = extract_text_from_ai(improve_response(question, response_text, level))
else:
break
return response_text
# Evaluate response with self-assessment and interactive improvement
# Evaluates the response and reformulates it if poorly constructed
def evaluate_responses_with_ai(question, generate_response_fn, n_variants=3, reformulation_threshold=0.6):
temperature_values = [0.7, 0.4, 0.9][:n_variants]
responses = [generate_response_fn(question, temperature=t) for t in temperature_values]
scores = [evaluate_coherence(question, r) for r in responses]
idx = scores.index(max(scores))
confidence = scores[idx]
best_response = responses[idx]
if confidence < reformulation_threshold:
new_question = reformulate_question(question)
return evaluate_responses_with_ai(new_question, generate_response_fn)
return {
"response": best_response,
"confidence": round(confidence, 3),
"note": generate_note(confidence)
}
def evaluate_responses_with_ai_simple(question, response, level="basic"):
"""
Evaluates the quality of the generated response relative to the question.
Returns a dictionary with:
- semantic coherence score
- reason for weakness
- suggested reformulation
- reflection on reasoning
- flag for auto-improvement
"""
evaluation_prompt = f"""
User question: "{question}"
Generated response: "{response}"
Required level: {level}
Evaluate the response in 5 points:
1. Semantic coherence (0–1)
2. Conceptual completeness
3. Argumentative structure
4. Adequacy to the required level
5. Ability to stimulate new questions
If the response is weak:
- Explain the reason
- Suggest a reformulation
- Reflect on how the system reasoned
Return everything in structured format.
"""
try:
ai_evaluation = llm.invoke(evaluation_prompt)
raw_output = getattr(ai_evaluation, "content", str(ai_evaluation))
except Exception as e:
print("Evaluation error:", e)
return {
"semantic_score": 0.0,
"weakness_reason": "System error",
"new_formulation": None,
"self_reflection": None,
"requires_improvement": True
}
# Simplified parsing functions (can be enhanced with regex or LLM)
def extract_score(text):
match = re.search(r"Semantic coherence\s*[:\-]?\s*(0\.\d+)", text)
return float(match.group(1)) if match else 0.0
def extract_reason(text):
match = re.search(r"Reason\s*[:\-]?\s*(.+)", text)
return match.group(1).strip() if match else "Reason not found."
def extract_reformulation(text):
match = re.search(r"Reformulation\s*[:\-]?\s*(.+)", text)
return match.group(1).strip() if match else None
def extract_reflection(text):
match = re.search(r"Reflection\s*[:\-]?\s*(.+)", text)
return match.group(1).strip() if match else None
# Actual parsing
score = extract_score(raw_output)
reason = extract_reason(raw_output)
reformulation = extract_reformulation(raw_output)
reflection = extract_reflection(raw_output)
return {
"response": response,
"semantic_score": score,
"weakness_reason": reason,
"new_formulation": reformulation,
"self_reflection": reflection,
"requires_improvement": score < 0.7
}
def generate_metacognitive_content(question, response, evaluation):
return f"""
[Question] {question}
[Response] {response}
[Coherence Score] {evaluation['semantic_score']}
[Weakness Reason] {evaluation['weakness_reason']}
[Suggested Reformulation] {evaluation['new_formulation']}
[Cognitive Reflection] {evaluation['self_reflection']}
[Needs Improvement] {evaluation['requires_improvement']}
""".strip()
def add_metacognitive_memory(question, response):
# Cognitive evaluation of the response
evaluation = evaluate_responses_with_ai(question, response)
# Generate textual content with all metacognitive data
textual_content = generate_metacognitive_content(question, response, evaluation)
# Generate semantic embedding from the full content
embedding = embedding_model.encode([textual_content])
# Add to FAISS index
index.add(np.array(embedding, dtype=np.float32))
# Save updated index
with open(INDEX_FILE, "wb") as f:
pickle.dump(index, f)
print("Metacognitive memory updated!")
def search_similar_reasoning(query, top_k=5):
"""
Searches the FAISS metacognitive memory for reasoning most similar to the input query.
Returns a list of the most relevant textual contents.
"""
# Encode the query
query_vector = embedding_model.encode([query])
# Search for top-K nearest
distances, indices = index.search(np.array(query_vector, dtype=np.float32), top_k)
results = []
for idx in indices[0]:
try:
with open("meta_diary.json", "r", encoding="utf-8") as f:
archive = json.load(f)
content = archive.get(str(idx))
if content:
results.append(content)
except Exception as e:
print(f"Memory retrieval error: {e}")
return results
def add_metacognition_to_response(response, evaluation):
reflection = evaluation.get("self_reflection", "")
note = evaluation.get("weakness_reason", "")
return f"{response.strip()}\n\n*Metacognitive note:* {note}\n*Agent's reflection:* {reflection}"
def auto_feedback(question, response, level):
return f"""Analyze the response in relation to the question: "{question}".
Evaluate the content according to the level '{level}' and suggest improvements.
"""
# === Full flow example ===
async def scientific_creativity_flow(concept, subject, language="en", level="advanced"):
creative_hypothesis = simulate_scientific_creativity(concept, subject, language=language, level=level)
articles, _ = await search_multi_database(concept) # Retrieve existing scientific sources
novelty_evaluation = evaluate_hypothesis_novelty(creative_hypothesis, articles)
return {
"hypothesis": creative_hypothesis,
"novelty": novelty_evaluation
} |