File size: 1,122 Bytes
3a93742
ca69070
3a93742
 
 
 
 
 
ca69070
 
 
 
 
3a93742
ca69070
 
3a93742
ca69070
 
 
 
 
3a93742
 
ca69070
 
 
 
3a93742
 
ca69070
 
 
 
3a93742
 
ca69070
 
 
 
 
3a93742
ca69070
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import json
import os
from datetime import datetime
from rag.llm import generate

LOG_PATH = "logs/metrics.jsonl"


def ensure_log_dir():
    """Cria o diretório de logs se não existir"""
    os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True)


def evaluate_and_log(question, context, answer):
    ensure_log_dir()
    
    prompt = (
        "Evaluate the answer based on faithfulness, clarity, usefulness and completeness. "
        "Provide a short justification.\n\n"
        f"Context:\n{context}\n\n"
        f"Answer:\n{answer}\n\n"
        "Evaluation:"
    )

    try:
        evaluation = generate(prompt, max_tokens=200)
    except Exception as e:
        evaluation = f"Evaluation failed: {str(e)}"

    record = {
        "timestamp": datetime.utcnow().isoformat(),
        "question": question,
        "answer": answer,
        "evaluation": evaluation
    }

    try:
        with open(LOG_PATH, "a", encoding="utf-8") as f:
            f.write(json.dumps(record, ensure_ascii=False) + "\n")
    except Exception as e:
        print(f"Warning: Could not write to log file: {e}")

    return evaluation