LorenzoBioinfo commited on
Commit
38e243c
·
1 Parent(s): f90bafa

Add monitoring

Browse files
src/monitoring.py CHANGED
@@ -43,11 +43,6 @@ def retrain_on_youtube_sample():
43
 
44
 
45
 
46
-
47
-
48
-
49
-
50
-
51
  def main():
52
  print("Caricamento del modello")
53
  model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
 
43
 
44
 
45
 
 
 
 
 
 
46
  def main():
47
  print("Caricamento del modello")
48
  model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
tests/integration/test_monitoring.py CHANGED
@@ -1,24 +1,36 @@
1
  import os
2
  import json
3
  import pytest
4
- from src.monitoring import monitor_model
 
 
5
 
6
- METRICS_PATH = "reports/metrics.json"
7
 
8
  @pytest.fixture(autouse=True)
9
  def cleanup_metrics():
10
- """Pulisce file metrics prima del test."""
11
  if os.path.exists(METRICS_PATH):
12
  os.remove(METRICS_PATH)
13
  yield
14
  if os.path.exists(METRICS_PATH):
15
  os.remove(METRICS_PATH)
16
 
 
17
  def test_monitoring_creates_metrics():
18
- """Verifica che il monitoring crei il file metrics.json."""
19
- monitor_model()
 
20
  assert os.path.exists(METRICS_PATH), "metrics.json non è stato generato"
21
 
 
22
  with open(METRICS_PATH, "r") as f:
23
  metrics = json.load(f)
24
- assert "accuracy" in metrics and "f1" in metrics, "Metriche principali mancanti"
 
 
 
 
 
 
 
 
 
1
  import os
2
  import json
3
  import pytest
4
+ from src.monitoring import main, REPORTS_DIR
5
+
6
+ METRICS_PATH = os.path.join(REPORTS_DIR, "metrics.json")
7
 
 
8
 
9
  @pytest.fixture(autouse=True)
10
  def cleanup_metrics():
11
+ """Rimuove il file metrics.json prima e dopo i test."""
12
  if os.path.exists(METRICS_PATH):
13
  os.remove(METRICS_PATH)
14
  yield
15
  if os.path.exists(METRICS_PATH):
16
  os.remove(METRICS_PATH)
17
 
18
+
19
  def test_monitoring_creates_metrics():
20
+ """Verifica che il monitoring crei correttamente il file metrics.json e contenga i dati previsti."""
21
+ main()
22
+
23
  assert os.path.exists(METRICS_PATH), "metrics.json non è stato generato"
24
 
25
+
26
  with open(METRICS_PATH, "r") as f:
27
  metrics = json.load(f)
28
+
29
+ assert "TweetEval" in metrics, "Mancano metriche TweetEval"
30
+ assert "YouTube" in metrics, "Mancano metriche YouTube"
31
+
32
+
33
+ for dataset_name, data in metrics.items():
34
+ assert "accuracy" in data, f"Manca accuracy per {dataset_name}"
35
+ assert "f1" in data, f"Manca F1 per {dataset_name}"
36
+