jarric commited on
Commit
e81e6d0
·
verified ·
1 Parent(s): f0bd2ca

Initial upload

Browse files
Files changed (5) hide show
  1. main.py +153 -0
  2. model_99_finetuned.pt +3 -0
  3. pretransform_files.py +37 -0
  4. read_cam.py +83 -0
  5. requirements.txt +189 -0
main.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import os
3
+
4
+ from torch.nn import Linear
5
+ from torchvision.transforms import v2
6
+
7
+ import data.dataset
8
+ from torch.optim.lr_scheduler import CosineAnnealingLR
9
+ import pandas
10
+ from torchmetrics.classification import MulticlassAccuracy, MulticlassAveragePrecision, MulticlassF1Score
11
+ from torchvision.models import mobilenet_v3_small, MobileNet_V3_Small_Weights
12
+ from torch import nn, optim
13
+
14
+ import torch
15
+ from tqdm import tqdm
16
+
17
+ from torch.utils.data import random_split
18
+
19
+ import mlflow
20
+
21
+ if __name__ == '__main__':
22
+ mlflow.set_tracking_uri('http://localhost:5000')
23
+ curr_date = datetime.datetime.now()
24
+ os.mkdir(f"outputs/{curr_date}")
25
+ # Input data files are available in the read-only "../input/" directory
26
+ # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
27
+ with mlflow.start_run():
28
+ device = torch.device("cuda")
29
+ augmentation_transforms = v2.Compose([
30
+ v2.RandomHorizontalFlip(),
31
+ v2.RandomVerticalFlip(),
32
+ v2.RandomGrayscale(),
33
+ v2.RandomAutocontrast(),
34
+ v2.RandomRotation(45),
35
+ ]).to("cuda")
36
+ dataset = data.dataset.OrangeDataset("/home/jarric/orange_dataset/processed/FIELD IMAGES/")
37
+
38
+ train_size = int(0.75 * len(dataset))
39
+ val_size = len(dataset) - train_size
40
+ train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
41
+
42
+ # Create data loaders
43
+ batch_size = 32
44
+
45
+ train_loader = torch.utils.data.DataLoader(
46
+ train_dataset,
47
+ batch_size=batch_size,
48
+ shuffle=False,
49
+ num_workers=8,
50
+
51
+ )
52
+
53
+ val_loader = torch.utils.data.DataLoader(
54
+ val_dataset,
55
+ batch_size=batch_size,
56
+ shuffle=False,
57
+ num_workers=8,
58
+
59
+ )
60
+ epochs = 100
61
+
62
+ mobilenet_v3_model = mobilenet_v3_small(weights=MobileNet_V3_Small_Weights.IMAGENET1K_V1.DEFAULT).to(device)
63
+
64
+ for param in mobilenet_v3_model.parameters(): # freeze layers
65
+ param.requires_grad = False
66
+
67
+ mobilenet_v3_model.classifier[3] = Linear(in_features=1024, out_features=3, bias=True)
68
+ mobilenet_v3_model.classifier[3].requires_grad = True
69
+
70
+ mobilenet_v3_model.cuda()
71
+ loss_fn = nn.CrossEntropyLoss().to(device)
72
+
73
+ optimizer = optim.AdamW(
74
+ mobilenet_v3_model.parameters(),
75
+ lr=1e-4,
76
+ weight_decay=1e-4)
77
+
78
+ reporting_interval_train = 50
79
+ reporting_interval_val = 10
80
+
81
+ loss_fn = loss_fn.to(device)
82
+
83
+ # metrics
84
+ acc_metric = MulticlassAccuracy(num_classes=3).to(device)
85
+ ap_metric = MulticlassAveragePrecision(num_classes=3, average="macro").to(device)
86
+ f1_metric = MulticlassF1Score(num_classes=3).to(device)
87
+
88
+
89
+ train_step = 0
90
+ val_step = 0
91
+ for epoch in tqdm(range(0, epochs)):
92
+ train_loss = 0
93
+ avg_accuracy = 0
94
+ cur_iter = 0
95
+ average_precision = 0
96
+ f1_score_avg = 0
97
+ mobilenet_v3_model.train()
98
+ for images, labels in tqdm(train_loader, leave=False):
99
+ images, labels = images.to(device), labels.to(device)
100
+
101
+ images = augmentation_transforms(images)
102
+ outputs = mobilenet_v3_model(images)
103
+ loss = loss_fn(outputs, labels)
104
+ train_loss += loss.item()
105
+
106
+ optimizer.zero_grad()
107
+ loss.backward()
108
+ optimizer.step()
109
+ _, predicted = torch.max(outputs.data, 1)
110
+ avg_accuracy += acc_metric(predicted, labels)
111
+ average_precision += ap_metric(outputs, labels)
112
+ f1_score_avg += f1_metric(predicted, labels)
113
+
114
+ train_loss /= len(train_loader)
115
+ avg_accuracy /= len(train_loader)
116
+ average_precision /= len(train_loader)
117
+ f1_score_avg /= len(train_loader)
118
+ mlflow.log_metric("train_loss", train_loss, step=epoch)
119
+ mlflow.log_metric("train_avg_accuracy", avg_accuracy, step=epoch)
120
+ mlflow.log_metric("train_average_precision", average_precision, step=epoch)
121
+ mlflow.log_metric("f1_score_avg", f1_score_avg, step=epoch)
122
+
123
+ val_loss = 0
124
+ val_accuracy = 0
125
+ cur_iter = 0
126
+ average_precision_val = 0
127
+ f1_score_avg_val = 0
128
+ mobilenet_v3_model.eval()
129
+ with torch.no_grad():
130
+ for images, labels in tqdm(val_loader, leave=False):
131
+ images, labels = images.to(device), labels.to(device)
132
+
133
+ # Forward pass
134
+ outputs = mobilenet_v3_model(images)
135
+ loss = loss_fn(outputs, labels)
136
+ val_loss += loss.item()
137
+ _, predicted = torch.max(outputs.data, 1)
138
+ average_precision_val += ap_metric(outputs, labels)
139
+ val_accuracy += acc_metric(predicted, labels)
140
+ f1_score_avg_val += f1_metric(predicted, labels)
141
+
142
+ val_loss /= len(val_loader)
143
+ val_accuracy /= len(val_loader)
144
+ average_precision_val /= len(val_loader)
145
+ f1_score_avg_val /= len(val_loader)
146
+ mlflow.log_metric("val_loss", val_loss, step=epoch)
147
+ mlflow.log_metric("val_avg_accuracy", val_accuracy, step=epoch)
148
+ mlflow.log_metric("val_average_precision", average_precision_val, step=epoch)
149
+ mlflow.log_metric("val_f1_score_avg", f1_score_avg_val, step=epoch)
150
+
151
+
152
+ torch.save(mobilenet_v3_model, f"outputs/{curr_date}/model_{epoch}_finetuned.pt")
153
+ mlflow.log_artifact(f"outputs/{curr_date}/model_{epoch}_finetuned.pt")
model_99_finetuned.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e67254771b55408e9bb3310ff598ae747238fd218c3274d54875d07914c7426
3
+ size 6255666
pretransform_files.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ from copy import copy
4
+ from typing import List
5
+
6
+ import torch
7
+ from torchvision.io import read_image
8
+ from torchvision.models import MobileNet_V3_Small_Weights
9
+ from tqdm import tqdm
10
+
11
+ if __name__ == "__main__":
12
+ target_types = ["*jpg", "*jpeg", "*png"]
13
+ target_dataset_path = pathlib.Path("/home/jarric/orange_dataset/archive/FIELD IMAGES/")
14
+ output_path = "/home/jarric/orange_dataset/processed/FIELD IMAGES/"
15
+ file_paths: List[pathlib.Path] = []
16
+
17
+ mobilenet_v3_transforms = MobileNet_V3_Small_Weights.IMAGENET1K_V1.transforms()
18
+ mobilenet_v3_transforms = mobilenet_v3_transforms.to("cuda")
19
+
20
+ for target_type in target_types:
21
+ file_paths.extend(list(target_dataset_path.rglob(target_type)))
22
+
23
+
24
+ for file_path in tqdm(file_paths, desc="Preprocessing files"):
25
+ image_tensor = read_image(str(file_path)).to("cuda")
26
+ processed_tensor = mobilenet_v3_transforms(image_tensor)
27
+
28
+ filename_parts = file_path.parts[-2:]
29
+ augmented_filename = f"{filename_parts[-1].split('.')[0]}_augmented.{filename_parts[-1].split('.')[-1]}"
30
+ augmented_file_parts = copy(list(filename_parts))
31
+ augmented_file_parts[-1] = augmented_filename
32
+ output_file_path = pathlib.Path(f"{output_path}{'/'.join(filename_parts)}")
33
+
34
+ if not output_file_path.parent.exists():
35
+ os.mkdir(str(output_file_path.parent))
36
+
37
+ torch.save(processed_tensor.cpu(), output_file_path)
read_cam.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import cv2
4
+ from sympy.stats.rv import probability
5
+ from torch.nn import Sequential, Conv2d, BatchNorm2d, Hardswish, ReLU, AdaptiveAvgPool2d, Hardsigmoid, Linear, Dropout
6
+ from torchvision.io import read_image
7
+ from torchvision.models import MobileNet_V3_Small_Weights, MobileNetV3
8
+ from torchvision.models.mobilenetv3 import InvertedResidual
9
+ from torchvision.ops import Conv2dNormActivation, SqueezeExcitation
10
+
11
+ if __name__ == "__main__":
12
+ torch.serialization.add_safe_globals([MobileNetV3])
13
+ torch.serialization.add_safe_globals([Sequential])
14
+ torch.serialization.add_safe_globals([Conv2dNormActivation])
15
+ torch.serialization.add_safe_globals([Conv2d])
16
+ torch.serialization.add_safe_globals([BatchNorm2d])
17
+ torch.serialization.add_safe_globals([Hardswish])
18
+ torch.serialization.add_safe_globals([InvertedResidual])
19
+ torch.serialization.add_safe_globals([ReLU])
20
+ torch.serialization.add_safe_globals([SqueezeExcitation])
21
+ torch.serialization.add_safe_globals([AdaptiveAvgPool2d])
22
+ torch.serialization.add_safe_globals([Hardsigmoid])
23
+ torch.serialization.add_safe_globals([Linear])
24
+ torch.serialization.add_safe_globals([Dropout])
25
+
26
+ mobilenetv3 = torch.load("/home/jarric/PycharmProjects/OrangeRecognizer/outputs/2025-04-21 12:08:02.020215/model_99_finetuned.pt")
27
+ mobilenetv3 = mobilenetv3.to("cuda")
28
+ mobilenet_v3_transforms = MobileNet_V3_Small_Weights.IMAGENET1K_V1.transforms().to("cuda")
29
+ class_assigment = {'citrus canker': 0, 'healthy': 1, 'melanose': 2}
30
+ class_numbering = {v:k for k, v in class_assigment.items()}
31
+
32
+ font = cv2.FONT_HERSHEY_SIMPLEX
33
+
34
+ vid_cam = cv2.VideoCapture(0)
35
+ confidence_list = []
36
+ if not vid_cam.isOpened():
37
+ print("Cannot open camera")
38
+ exit(-1)
39
+
40
+ warmed_up = False
41
+ final_confidence = 0
42
+ class_name = ""
43
+ averaged_final_confidences = []
44
+ while True:
45
+ ret, frame = vid_cam.read()
46
+ if not ret:
47
+ exit(-2)
48
+
49
+ orig_frame = frame.copy()
50
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
51
+ frame_resized_for_lookup = cv2.resize(orig_frame, (244 * 2, 224 * 2))
52
+ tensor_frame = torch.Tensor(frame).permute((2, 0, 1)).to(torch.uint8).to("cuda") # fuck you torch
53
+ processed_frame = mobilenet_v3_transforms(tensor_frame.unsqueeze(0))
54
+ logits = mobilenetv3(processed_frame)
55
+ confidences = logits.softmax(dim=-1)
56
+ _, classification = torch.max(logits.data, 1)
57
+ value, _ = torch.max(confidences, 1)
58
+ confidence_list.append(confidences.detach().cpu().squeeze(0))
59
+
60
+ if len(confidence_list) >= 50:
61
+ warmed_up = True
62
+
63
+ averaged_final_confidences = []
64
+ for class_num in class_numbering.keys():
65
+ averaged_final_confidences.append(np.array(confidence_list)[:, class_num].mean())
66
+
67
+ final_confidence, selected_class = torch.max(torch.Tensor(averaged_final_confidences).to("cuda").unsqueeze(0), 1)
68
+ class_name = class_numbering[selected_class.detach().cpu().item()]
69
+ averages_for_reference = [value.item() for value in averaged_final_confidences]
70
+ print(averages_for_reference)
71
+
72
+ confidence_list.clear()
73
+
74
+ if warmed_up:
75
+ cv2.putText(frame_resized_for_lookup, f"{class_name}, [{round(final_confidence.detach().cpu().item(), 2)}]", (50, 50), font, 1, (0, 0, 255), 3)
76
+ cv2.imshow("frame", frame_resized_for_lookup)
77
+
78
+ if cv2.waitKey(1) == ord('q'):
79
+ break
80
+
81
+ # vid_cam.release()
82
+ cv2.destroyAllWindows()
83
+ exit(0)
requirements.txt ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ alembic==1.15.2
2
+ annotated-types==0.7.0
3
+ anyio @ file:///croot/anyio_1729121277521/work
4
+ argon2-cffi @ file:///opt/conda/conda-bld/argon2-cffi_1645000214183/work
5
+ argon2-cffi-bindings @ file:///croot/argon2-cffi-bindings_1736182440035/work
6
+ asttokens @ file:///croot/asttokens_1743630435401/work
7
+ async-lru @ file:///work/perseverance-python-buildout/croot/async-lru_1701732681408/work
8
+ attrs @ file:///croot/attrs_1734533101012/work
9
+ babel @ file:///croot/babel_1737454360933/work
10
+ beautifulsoup4 @ file:///croot/beautifulsoup4-split_1718029820055/work
11
+ bleach @ file:///croot/bleach_1732290411627/work
12
+ blinker==1.9.0
13
+ Bottleneck @ file:///croot/bottleneck_1731058641041/work
14
+ Brotli @ file:///croot/brotli-split_1736182456865/work
15
+ cachetools==5.5.2
16
+ certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1739515848642/work/certifi
17
+ cffi @ file:///croot/cffi_1736182485317/work
18
+ charset-normalizer @ file:///croot/charset-normalizer_1721748349566/work
19
+ click==8.1.8
20
+ cloudpickle==3.1.1
21
+ comm @ file:///croot/comm_1709322850197/work
22
+ contourpy @ file:///croot/contourpy_1732540045555/work
23
+ cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
24
+ databricks-sdk==0.50.0
25
+ debugpy @ file:///croot/debugpy_1736267418885/work
26
+ decorator @ file:///opt/conda/conda-bld/decorator_1643638310831/work
27
+ defusedxml @ file:///tmp/build/80754af9/defusedxml_1615228127516/work
28
+ Deprecated @ file:///home/conda/feedstock_root/build_artifacts/deprecated_1737986966356/work
29
+ docker==7.1.0
30
+ executing @ file:///opt/conda/conda-bld/executing_1646925071911/work
31
+ fastapi==0.115.12
32
+ fastjsonschema @ file:///croot/python-fastjsonschema_1731939362158/work
33
+ filelock==3.13.1
34
+ Flask==3.1.0
35
+ fonttools @ file:///croot/fonttools_1737039080035/work
36
+ fsspec==2024.6.1
37
+ gitdb==4.0.12
38
+ GitPython==3.1.44
39
+ google-auth==2.39.0
40
+ graphene==3.4.3
41
+ graphql-core==3.2.6
42
+ graphql-relay==3.2.0
43
+ greenlet==3.2.0
44
+ gunicorn==23.0.0
45
+ h11 @ file:///croot/h11_1706652277403/work
46
+ httpcore @ file:///croot/httpcore_1706728464539/work
47
+ httpx @ file:///croot/httpx_1723474802858/work
48
+ huggingface-hub==0.30.2
49
+ idna @ file:///croot/idna_1714398848350/work
50
+ importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1737420181517/work
51
+ inquirerpy==0.3.4
52
+ ipykernel @ file:///croot/ipykernel_1737660677549/work
53
+ ipython @ file:///croot/ipython_1734548052611/work
54
+ ipywidgets==8.1.6
55
+ itsdangerous==2.2.0
56
+ jedi @ file:///croot/jedi_1733987392413/work
57
+ Jinja2==3.1.4
58
+ joblib==1.4.2
59
+ json5 @ file:///croot/json5_1730786798687/work
60
+ jsonschema @ file:///croot/jsonschema_1728486696720/work
61
+ jsonschema-specifications @ file:///work/perseverance-python-buildout/croot/jsonschema-specifications_1701731664072/work
62
+ jupyter-events @ file:///croot/jupyter_events_1741184577592/work
63
+ jupyter-lsp @ file:///work/perseverance-python-buildout/croot/jupyter-lsp-meta_1707343107456/work
64
+ jupyter_client @ file:///croot/jupyter_client_1737570961872/work
65
+ jupyter_core @ file:///croot/jupyter_core_1718818295206/work
66
+ jupyter_server @ file:///croot/jupyter_server_1741200034823/work
67
+ jupyter_server_terminals @ file:///croot/jupyter_server_terminals_1744706698694/work
68
+ jupyterlab @ file:///croot/jupyterlab_1737555423487/work
69
+ jupyterlab_pygments @ file:///croot/jupyterlab_pygments_1741124142640/work
70
+ jupyterlab_server @ file:///croot/jupyterlab_server_1725865349919/work
71
+ jupyterlab_widgets==3.0.14
72
+ kiwisolver @ file:///croot/kiwisolver_1737039087198/work
73
+ lightning @ file:///home/conda/feedstock_root/build_artifacts/lightning_1742437252389/work
74
+ lightning-utilities @ file:///home/conda/feedstock_root/build_artifacts/lightning-utilities_1743734367405/work
75
+ lit @ file:///home/conda/feedstock_root/build_artifacts/llvm-package_1744799902975/work/llvm/utils/lit
76
+ Mako==1.3.10
77
+ Markdown==3.8
78
+ MarkupSafe==2.1.5
79
+ matplotlib==3.10.0
80
+ matplotlib-inline @ file:///work/perseverance-python-buildout/croot/matplotlib-inline_1698864771271/work
81
+ mistune @ file:///croot/mistune_1741124011532/work
82
+ mkl-service==2.4.0
83
+ mkl_fft @ file:///io/mkl313/mkl_fft_1730824109137/work
84
+ mkl_random @ file:///io/mkl313/mkl_random_1730823916628/work
85
+ mlflow==2.21.3
86
+ mlflow-skinny==2.21.3
87
+ mpmath @ file:///home/conda/feedstock_root/build_artifacts/mpmath_1733302684489/work
88
+ nbclient @ file:///croot/nbclient_1741123995822/work
89
+ nbconvert @ file:///croot/nbconvert-meta_1741184653315/work
90
+ nbformat @ file:///croot/nbformat_1728049424075/work
91
+ nest-asyncio @ file:///croot/nest-asyncio_1708532673751/work
92
+ networkx==3.3
93
+ notebook @ file:///croot/notebook_1738159946465/work
94
+ notebook_shim @ file:///croot/notebook-shim_1741707758683/work
95
+ numexpr @ file:///croot/numexpr_1730215937391/work
96
+ numpy==2.1.2
97
+ nvidia-cublas-cu12==12.4.5.8
98
+ nvidia-cuda-cupti-cu12==12.4.127
99
+ nvidia-cuda-nvrtc-cu12==12.4.127
100
+ nvidia-cuda-runtime-cu12==12.4.127
101
+ nvidia-cudnn-cu12==9.1.0.70
102
+ nvidia-cufft-cu12==11.2.1.3
103
+ nvidia-curand-cu12==10.3.5.147
104
+ nvidia-cusolver-cu12==11.6.1.9
105
+ nvidia-cusparse-cu12==12.3.1.170
106
+ nvidia-cusparselt-cu12==0.6.2
107
+ nvidia-nccl-cu12==2.21.5
108
+ nvidia-nvjitlink-cu12==12.4.127
109
+ nvidia-nvtx-cu12==12.4.127
110
+ opencv-python==4.11.0.86
111
+ opentelemetry-api @ file:///home/conda/feedstock_root/build_artifacts/opentelemetry-api_1744805275188/work
112
+ opentelemetry-sdk==1.32.1
113
+ opentelemetry-semantic-conventions==0.53b1
114
+ overrides @ file:///work/perseverance-python-buildout/croot/overrides_1701732220415/work
115
+ packaging @ file:///croot/packaging_1734472117206/work
116
+ pandas @ file:///croot/pandas_1732735089971/work/dist/pandas-2.2.3-cp312-cp312-linux_x86_64.whl#sha256=57b66702d418720ec8483f7c4ec7c08d41815316ad7ce09d5b7bbc34eefcfdfd
117
+ pandocfilters @ file:///opt/conda/conda-bld/pandocfilters_1643405455980/work
118
+ parso @ file:///croot/parso_1733963305961/work
119
+ pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
120
+ pfzy==0.3.4
121
+ pillow==11.0.0
122
+ Pillow-SIMD==9.5.0.post2
123
+ platformdirs @ file:///croot/platformdirs_1744273042065/work
124
+ prometheus_client @ file:///croot/prometheus_client_1744271615306/work
125
+ prompt-toolkit @ file:///croot/prompt-toolkit_1704404351921/work
126
+ protobuf==5.29.4
127
+ psutil @ file:///croot/psutil_1736367091698/work
128
+ ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
129
+ pure-eval @ file:///opt/conda/conda-bld/pure_eval_1646925070566/work
130
+ pyarrow==19.0.1
131
+ pyasn1==0.6.1
132
+ pyasn1_modules==0.4.2
133
+ pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
134
+ pydantic==2.11.3
135
+ pydantic_core==2.33.1
136
+ Pygments @ file:///croot/pygments_1744664109463/work
137
+ pyparsing @ file:///croot/pyparsing_1731445506121/work
138
+ PySocks @ file:///work/perseverance-python-buildout/croot/pysocks_1698845478203/work
139
+ python-dateutil @ file:///croot/python-dateutil_1716495738603/work
140
+ python-json-logger @ file:///croot/python-json-logger_1734370021104/work
141
+ pytorch-lightning @ file:///home/conda/feedstock_root/build_artifacts/pytorch-lightning_1742579345300/work
142
+ pytz @ file:///croot/pytz_1713974312559/work
143
+ PyYAML @ file:///croot/pyyaml_1728657952215/work
144
+ pyzmq @ file:///croot/pyzmq_1734687138743/work
145
+ referencing @ file:///work/perseverance-python-buildout/croot/referencing_1701731622327/work
146
+ requests @ file:///croot/requests_1730999120400/work
147
+ rfc3339-validator @ file:///work/perseverance-python-buildout/croot/rfc3339-validator_1698873973408/work
148
+ rfc3986-validator @ file:///work/perseverance-python-buildout/croot/rfc3986-validator_1698874010100/work
149
+ rpds-py @ file:///croot/rpds-py_1736541261634/work
150
+ rsa==4.9.1
151
+ scikit-learn==1.6.1
152
+ scipy==1.15.2
153
+ seaborn @ file:///croot/seaborn_1741185878286/work
154
+ Send2Trash @ file:///croot/send2trash_1736540790175/work
155
+ setuptools==75.8.0
156
+ six @ file:///croot/six_1744271502820/work
157
+ smmap==5.0.2
158
+ sniffio @ file:///croot/sniffio_1705431295498/work
159
+ soupsieve @ file:///work/perseverance-python-buildout/croot/soupsieve_1698866207280/work
160
+ SQLAlchemy==2.0.40
161
+ sqlparse==0.5.3
162
+ stack-data @ file:///opt/conda/conda-bld/stack_data_1646927590127/work
163
+ starlette==0.46.2
164
+ sympy==1.13.1
165
+ terminado @ file:///work/perseverance-python-buildout/croot/terminado_1698874934826/work
166
+ threadpoolctl==3.6.0
167
+ tinycss2 @ file:///croot/tinycss2_1738337643607/work
168
+ torch==2.6.0
169
+ torchaudio==2.6.0
170
+ torchmetrics @ file:///home/conda/feedstock_root/build_artifacts/torchmetrics_1744107217838/work
171
+ torchvision==0.21.0
172
+ tornado @ file:///croot/tornado_1733960490606/work
173
+ tqdm @ file:///croot/tqdm_1738943501192/work
174
+ traitlets @ file:///croot/traitlets_1718227057033/work
175
+ triton==3.2.0
176
+ typing-inspection==0.4.0
177
+ typing_extensions @ file:///croot/typing_extensions_1734714854207/work
178
+ tzdata @ file:///croot/python-tzdata_1690578112552/work
179
+ unicodedata2 @ file:///croot/unicodedata2_1736541023050/work
180
+ urllib3 @ file:///croot/urllib3_1737133630106/work
181
+ uvicorn==0.34.2
182
+ wcwidth @ file:///Users/ktietz/demo/mc3/conda-bld/wcwidth_1629357192024/work
183
+ webencodings @ file:///work/perseverance-python-buildout/croot/webencodings_1698866454420/work
184
+ websocket-client @ file:///croot/websocket-client_1715878298792/work
185
+ Werkzeug==3.1.3
186
+ wheel==0.45.1
187
+ widgetsnbextension==4.0.14
188
+ wrapt @ file:///croot/wrapt_1736540904746/work
189
+ zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1732827521216/work