Transformers
Italian
English
semantic-search
explainable-ai
faiss
ai-ethics
responsible-ai
llm
prompt-engineering
multimodal-ai
ai-transparency
ethical-intelligence
explainable-llm
cognitive-ai
ethical-ai
scientific-retrieval
modular-ai
memory-augmented-llm
trustworthy-ai
reasoning-engine
ai-alignment
next-gen-llm
thinking-machines
open-source-ai
explainability
ai-research
semantic audit
cognitive agent
human-centered-ai
Create journal/journal.py
Browse files- src/journal/journal.py +50 -0
src/journal/journal.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# © 2025 Elena Marziali — Code released under Apache 2.0 license.
|
| 2 |
+
# See LICENSE in the repository for details.
|
| 3 |
+
# Removal of this copyright is prohibited.
|
| 4 |
+
|
| 5 |
+
# Estrai il contenuto testuale dalla risposta
|
| 6 |
+
response_text = getattr(response, "content", str(response)).strip()
|
| 7 |
+
|
| 8 |
+
# Ora puoi tagliare i primi 1000 caratteri
|
| 9 |
+
response_short = response_text[:1000]
|
| 10 |
+
|
| 11 |
+
reflection_prompt = f"""
|
| 12 |
+
You responded to the following prompt:
|
| 13 |
+
"{prompt}"
|
| 14 |
+
|
| 15 |
+
Your answer:
|
| 16 |
+
"{response_short}"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
Now reflect briefly on how you arrived at this answer.
|
| 20 |
+
|
| 21 |
+
- What reasoning path led you to this response?
|
| 22 |
+
- What criteria guided your choices?
|
| 23 |
+
- Were there any risks or ambiguities you considered?
|
| 24 |
+
- How does this reflect your cognitive style?
|
| 25 |
+
|
| 26 |
+
Write a concise reflection in 2–3 paragraphs, using a clear and analytical tone.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
reflection = llm.invoke(reflection_prompt).content.strip()
|
| 30 |
+
|
| 31 |
+
journal = {}
|
| 32 |
+
|
| 33 |
+
def record_journal(journal_id, prompt, response, reflection):
|
| 34 |
+
journal[journal_id] = {
|
| 35 |
+
"prompt": prompt,
|
| 36 |
+
"response": response,
|
| 37 |
+
"reflection": reflection
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
record_journal(journal_id=0, prompt=prompt, response=response, reflection=reflection)
|
| 41 |
+
|
| 42 |
+
def save_journal_markdown(data, file_name):
|
| 43 |
+
with open(file_name, "w", encoding="utf-8") as f:
|
| 44 |
+
f.write(f"# Reflective Journal\n\n")
|
| 45 |
+
f.write(f"## Prompt\n> {data['prompt']}\n\n")
|
| 46 |
+
f.write(f"## Response\n{data['response']}\n\n")
|
| 47 |
+
f.write(f"## Metacognitive Reflection\n{data['reflection']}\n")
|
| 48 |
+
|
| 49 |
+
filename = f"journal_{datetime.datetime.utcnow().isoformat()}.md"
|
| 50 |
+
save_journal_markdown(journal[0], filename)
|