Yusuf commited on
Commit
84cfdfc
·
1 Parent(s): 1ea541c

fix dataloader worker number

Browse files
best_model.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23a4c08eaad4b40290eca84e6a8fa3e1d69bdf4312d5db6db5de96d1d8753024
3
- size 130261986
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3c19d6a5fea8043e6fda261763b7909aaed487b83991f29ca395b2ce7c8e591
3
+ size 20532322
dataPrep/helpers/clearml_data.py CHANGED
@@ -11,7 +11,7 @@ Takes latest Data Prep ClearML task from project and reconstruct:
11
  - data loaders for both full and subset datasets
12
  - Aug settings used
13
  '''
14
- def extract_latest_data_task(project_name: str = "Small Group Project", num_workers: int = 8):
15
 
16
  # --------- Get latest Data Preparation task from ClearML ---------
17
 
 
11
  - data loaders for both full and subset datasets
12
  - Aug settings used
13
  '''
14
+ def extract_latest_data_task(project_name: str = "Small Group Project", num_workers: int = 0):
15
 
16
  # --------- Get latest Data Preparation task from ClearML ---------
17
 
testingModel/run_testing.py CHANGED
@@ -1,76 +1,77 @@
1
- from clearml import Task
2
- from dataPrep.helpers.clearml_data import extract_latest_data_task
3
-
4
- import torch
5
- from models.modelOne import modelOne
6
- from testingModel.helpers.evaluation import make_predictions
7
-
8
-
9
- # -------------- Load Data --------------
10
- project_name = "Small Group Project"
11
- subset_loaders, full_loaders, data_prep_metadata = extract_latest_data_task(project_name=project_name)
12
-
13
-
14
- # -------- ClearML Testing Task Setup --------
15
- testing_task = Task.init(
16
- project_name=f"{project_name}/Model Testing",
17
- task_name="Model Testing",
18
- task_type=Task.TaskTypes.testing,
19
- reuse_last_task_id=False,
20
- )
21
-
22
- # Reference the data prep task used
23
- testing_logger = testing_task.get_logger()
24
- testing_task.connect(data_prep_metadata, name="data_prep_metadata_READONLY")
25
-
26
- CLEARML_TRAINING_ID = "5bac154a885b4acbaa07d8588027bb27"
27
-
28
- # Testing parameters - Modify these when experimenting
29
- testing_config = {
30
- "model_train_id": CLEARML_TRAINING_ID,
31
- "num_classes": 39,
32
- "model_path": "best_model.pt",
33
- }
34
- testing_task.connect(testing_config)
35
-
36
- # Load the model weights from ClearML training task
37
- training_task = Task.get_task(task_id=testing_config["model_train_id"])
38
- model_artifact = training_task.artifacts.get("best_model")
39
- model_path = model_artifact.get_local_copy()
40
-
41
- # Reference training metadata
42
- training_hyperparams = training_task.get_parameters_as_dict()
43
- testing_task.connect(training_hyperparams['General'], name="training_metadata_READONLY")
44
-
45
-
46
- # -------- Rebuild the ML model --------
47
- model = modelOne()
48
- state_dict = torch.load(model_path, map_location="cpu") # Load to CPU first
49
- model.load_state_dict(state_dict)
50
- model.eval() # set dropout & batch norm layers to eval mode
51
-
52
- # Move model to GPU if available
53
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
54
- model.to(device)
55
-
56
-
57
- # -------------------- Test model on test set --------------------
58
- testing_logger.report_text("Starting evaluation on TEST SUBSET...\n")
59
- test_subset = subset_loaders['test']
60
-
61
- subset_results = make_predictions(model, test_subset, device)
62
-
63
-
64
- # Accuracy & Loss logging
65
- testing_logger.report_single_value(name="Test Subset Accuracy", value=subset_results["accuracy"])
66
- testing_logger.report_single_value(name="Test Subset Loss", value=subset_results["loss"])
67
-
68
-
69
- # --------- Complete -----------------
70
- print("\n------ Testing Complete ------")
71
- testing_logger.report_text(
72
- f"TEST SUBSET RESULTS:\n"
73
- f"Loss: {subset_results['loss']:.4f}\n"
74
- f"Accuracy: {subset_results['accuracy']:.4f}\n"
75
- )
76
- testing_task.close()
 
 
1
+ from clearml import Task
2
+ from dataPrep.helpers.clearml_data import extract_latest_data_task
3
+
4
+ import torch
5
+ from models.modelOne import modelOne
6
+ from models.modelTwo import BetterCNN
7
+ from testingModel.helpers.evaluation import make_predictions
8
+
9
+
10
+ # -------------- Load Data --------------
11
+ project_name = "Small Group Project"
12
+ subset_loaders, full_loaders, data_prep_metadata = extract_latest_data_task(project_name=project_name)
13
+
14
+
15
+ # -------- ClearML Testing Task Setup --------
16
+ testing_task = Task.init(
17
+ project_name=f"{project_name}/Model Testing",
18
+ task_name="Model Testing",
19
+ task_type=Task.TaskTypes.testing,
20
+ reuse_last_task_id=False,
21
+ )
22
+
23
+ # Reference the data prep task used
24
+ testing_logger = testing_task.get_logger()
25
+ testing_task.connect(data_prep_metadata, name="data_prep_metadata_READONLY")
26
+
27
+ CLEARML_TRAINING_ID = "dca82d7c2f404c249f2e5325aaf77207"
28
+
29
+ # Testing parameters - Modify these when experimenting
30
+ testing_config = {
31
+ "model_train_id": CLEARML_TRAINING_ID,
32
+ "num_classes": 39,
33
+ "model_path": "best_model.pt",
34
+ }
35
+ testing_task.connect(testing_config)
36
+
37
+ # Load the model weights from ClearML training task
38
+ training_task = Task.get_task(task_id=testing_config["model_train_id"])
39
+ model_artifact = training_task.artifacts.get("best_model")
40
+ model_path = model_artifact.get_local_copy()
41
+
42
+ # Reference training metadata
43
+ training_hyperparams = training_task.get_parameters_as_dict()
44
+ testing_task.connect(training_hyperparams['General'], name="training_metadata_READONLY")
45
+
46
+
47
+ # -------- Rebuild the ML model --------
48
+ model = BetterCNN(noOfClasses=testing_config["num_classes"])
49
+ state_dict = torch.load(model_path, map_location="cpu") # Load to CPU first
50
+ model.load_state_dict(state_dict)
51
+ model.eval() # set dropout & batch norm layers to eval mode
52
+
53
+ # Move model to GPU if available
54
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
55
+ model.to(device)
56
+
57
+
58
+ # -------------------- Test model on test set --------------------
59
+ testing_logger.report_text("Starting evaluation on TEST SUBSET...\n")
60
+ test_subset = subset_loaders['test']
61
+
62
+ subset_results = make_predictions(model, test_subset, device)
63
+
64
+
65
+ # Accuracy & Loss logging
66
+ testing_logger.report_single_value(name="Test Subset Accuracy", value=subset_results["accuracy"])
67
+ testing_logger.report_single_value(name="Test Subset Loss", value=subset_results["loss"])
68
+
69
+
70
+ # --------- Complete -----------------
71
+ print("\n------ Testing Complete ------")
72
+ testing_logger.report_text(
73
+ f"TEST SUBSET RESULTS:\n"
74
+ f"Loss: {subset_results['loss']:.4f}\n"
75
+ f"Accuracy: {subset_results['accuracy']:.4f}\n"
76
+ )
77
+ testing_task.close()
trainingModel/run_training.py CHANGED
@@ -1,8 +1,8 @@
1
- import os
2
  from clearml import Task
3
  from dataPrep.helpers.clearml_data import extract_latest_data_task
4
 
5
  import torch
 
6
  from models.modelTwo import BetterCNN
7
  from trainingModel.helpers.Training import train_model
8
 
 
 
1
  from clearml import Task
2
  from dataPrep.helpers.clearml_data import extract_latest_data_task
3
 
4
  import torch
5
+ from models.modelOne import modelOne
6
  from models.modelTwo import BetterCNN
7
  from trainingModel.helpers.Training import train_model
8