YashNagraj75 commited on
Commit
97faf00
·
1 Parent(s): 2bd73d5

Add cv2 to the requirements

Browse files
Files changed (4) hide show
  1. pyproject.toml +1 -0
  2. requirements.txt +1 -0
  3. training_scripts/train_ddpm.py +0 -305
  4. uv.lock +29 -8
pyproject.toml CHANGED
@@ -6,6 +6,7 @@ readme = "README.md"
6
  requires-python = ">=3.13"
7
  dependencies = [
8
  "numpy>=2.2.4",
 
9
  "pandas>=2.2.3",
10
  "torch>=2.6.0",
11
  "torchvision>=0.21.0",
 
6
  requires-python = ">=3.13"
7
  dependencies = [
8
  "numpy>=2.2.4",
9
+ "opencv-python>=4.11.0.86",
10
  "pandas>=2.2.3",
11
  "torch>=2.6.0",
12
  "torchvision>=0.21.0",
requirements.txt CHANGED
@@ -26,6 +26,7 @@ nvidia-cusparselt-cu12==0.6.2
26
  nvidia-nccl-cu12==2.21.5
27
  nvidia-nvjitlink-cu12==12.4.127
28
  nvidia-nvtx-cu12==12.4.127
 
29
  pandas==2.2.3
30
  pillow==11.1.0
31
  platformdirs==4.3.7
 
26
  nvidia-nccl-cu12==2.21.5
27
  nvidia-nvjitlink-cu12==12.4.127
28
  nvidia-nvtx-cu12==12.4.127
29
+ opencv-python==4.11.0.86
30
  pandas==2.2.3
31
  pillow==11.1.0
32
  platformdirs==4.3.7
training_scripts/train_ddpm.py DELETED
@@ -1,305 +0,0 @@
1
- import logging
2
- import os
3
-
4
- import numpy as np
5
- import torch
6
- import wandb
7
- import yaml
8
- from torch.utils.data import DataLoader
9
- from tqdm import tqdm
10
-
11
- from data import mnist_dataset
12
- from data.mnist_dataset import MnistDataset
13
- from model_blocks.unet_base import UNet
14
- from scheduler.linear_scheduler import LinearNoiseScheduler
15
-
16
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
- logger = logging.getLogger(__name__)
18
- wandb.login()
19
-
20
-
21
- def init_wandb(config):
22
- """
23
- Initialize a new wandb run
24
- """
25
- run = wandb.init(
26
- project="controlnet-ddpm-mnist",
27
- config=config,
28
- resume="allow", # Allows resuming if run was interrupted
29
- )
30
- return run
31
-
32
-
33
- def load_checkpoint(model, optimizer, scheduler, checkpoint_path):
34
- """
35
- Load model checkpoint from local file
36
- """
37
- checkpoint = torch.load(checkpoint_path, map_location=device)
38
-
39
- model.load_state_dict(checkpoint["model_state_dict"])
40
- optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
41
- if (
42
- scheduler
43
- and "scheduler_state_dict" in checkpoint
44
- and checkpoint["scheduler_state_dict"]
45
- ):
46
- scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
47
-
48
- start_epoch = checkpoint["epoch"] + 1 # Start from the next epoch
49
- step = checkpoint["step"]
50
-
51
- print(f"Loaded checkpoint from epoch {checkpoint['epoch']}")
52
- return start_epoch, step
53
-
54
-
55
- def save_checkpoint(
56
- model, optimizer, scheduler, epoch, loss, step, run, checkpoint_path
57
- ):
58
- """
59
- Save model checkpoint locally and to wandb
60
- """
61
- # Create checkpoint dictionary
62
- checkpoint = {
63
- "epoch": epoch,
64
- "model_state_dict": model.state_dict(),
65
- "optimizer_state_dict": optimizer.state_dict(),
66
- "scheduler_state_dict": scheduler.state_dict() if scheduler else None,
67
- "loss": loss,
68
- "step": step,
69
- }
70
-
71
- # Save locally
72
- torch.save(checkpoint, checkpoint_path)
73
-
74
- # Log to wandb
75
- artifact = wandb.Artifact(f"model-checkpoint-epoch-{epoch}", type="model")
76
- artifact.add_file(checkpoint_path)
77
- run.log_artifact(artifact)
78
-
79
- print(f"Checkpoint saved at epoch {epoch}")
80
- return checkpoint_path
81
-
82
-
83
- def train(args):
84
- with open(args.config_path, "r") as file:
85
- try:
86
- config = yaml.safe_load(file)
87
- except yaml.YAMLError as exc:
88
- print(exc)
89
- print(config)
90
- run = init_wandb(config["train_params"])
91
-
92
- diffusion_config = config["diffusion_params"]
93
- dataset_config = config["dataset_params"]
94
- model_config = config["model_params"]
95
- train_config = config["train_params"]
96
- scheduler = LinearNoiseScheduler(
97
- num_timesteps=diffusion_config["num_timesteps"],
98
- beta_start=diffusion_config["beta_start"],
99
- beta_end=diffusion_config["beta_end"],
100
- )
101
-
102
- mnist = MnistDataset("train", dataset_config["im_path"])
103
- mnist_loader = DataLoader(
104
- mnist, batch_size=train_config["batch_size"], shuffle=True, num_workers=4
105
- )
106
-
107
- model = UNet(model_config).to(device)
108
- model.train()
109
- logger.debug(f"Initialized model and set to train")
110
- optimizer = torch.optim.AdamW(model.parameters(), lr=train_config["ddpm_lr"])
111
- criterion = torch.nn.MSELoss()
112
-
113
- # Create the output directories
114
- if not os.path.exists(
115
- os.path.join(train_config["task_name"], train_config["ddpm_ckpt_name"])
116
- ):
117
- os.mkdir(train_config["task_name"])
118
-
119
- ckpt_path = os.path.join(train_config["task_name"], train_config["ddpm_ckpt_name"])
120
- # Load checkpoint if there
121
- if os.path.exists(ckpt_path):
122
- start_epoch, step = load_checkpoint(
123
- model, optimizer=optimizer, scheduler=scheduler, checkpoint_path=ckpt_path
124
- )
125
- else:
126
- start_epoch = 0
127
- step = 0
128
-
129
- # Log model architecture as a Table
130
- model_table = wandb.Table(columns=["Layer", "Parameters"])
131
- total_params = 0
132
- for name, param in model.named_parameters():
133
- if param.requires_grad:
134
- params = param.numel()
135
- total_params += params
136
- model_table.add_data(name, params)
137
-
138
- wandb.log({"model_architecture": model_table})
139
- wandb.log({"total_parameters": total_params})
140
-
141
- # Watch model gradients and parameters
142
- wandb.watch(model, log="all", log_freq=100)
143
-
144
- for epoch in range(start_epoch, train_config["num_epochs"]):
145
- losses = []
146
- batch_idx = 0
147
- progress_bar = tqdm(
148
- mnist_loader, desc=f"Epoch {epoch + 1}/{train_config['num_epochs']}"
149
- )
150
-
151
- for im in progress_bar:
152
- batch_idx += 1
153
- optimizer.zero_grad()
154
- im = im.float().to(device)
155
-
156
- # Sample noise
157
- noise = torch.randn_like(im).to(device)
158
- logger.debug(f"Sampled noise epoch {epoch} : {noise.shape}")
159
-
160
- # Sample timestep
161
- t = torch.randint(0, diffusion_config["num_timesteps"], (im.shape[0],)).to(
162
- device
163
- )
164
-
165
- noisy_im = scheduler.add_noise(im, noise, t)
166
- noise_pred = model(noisy_im, t)
167
-
168
- loss = criterion(noise_pred, noise)
169
- losses.append(loss.item())
170
- loss.backward()
171
- optimizer.step()
172
-
173
- # Calculate gradient norm for monitoring
174
- total_norm = 0
175
- for p in model.parameters():
176
- if p.grad is not None:
177
- param_norm = p.grad.data.norm(2)
178
- total_norm += param_norm.item() ** 2
179
- total_norm = total_norm**0.5
180
-
181
- # Update progress bar
182
- progress_bar.set_postfix({"loss": loss.item(), "avg_loss": np.mean(losses)})
183
-
184
- wandb.log(
185
- {
186
- "train/batch_loss": loss.item(),
187
- "train/step": step,
188
- "train/epoch": epoch + batch_idx / len(mnist_loader),
189
- "train/gradient_norm": total_norm,
190
- "train/learning_rate": optimizer.param_groups[0]["lr"],
191
- }
192
- )
193
-
194
- step += 1
195
-
196
- avg_loss = np.mean(losses)
197
-
198
- # Log epoch-level metrics
199
- wandb.log(
200
- {
201
- "train/epoch_loss": avg_loss,
202
- "train/epoch_completed": epoch,
203
- }
204
- )
205
-
206
- print(f"Finished epoch: {epoch} | Loss: {np.mean(losses):.4f}")
207
- if epoch % train_config["save_epoch"]:
208
- visualize_samples(
209
- model, scheduler, epoch, diffusion_config["num_timesteps"], device
210
- )
211
-
212
- save_checkpoint(
213
- model, optimizer, scheduler, epoch, np.mean(losses), step, run, ckpt_path
214
- )
215
-
216
- # Log final model as artifact
217
- logging.info("Finished training and starting to save model")
218
- final_model_path = os.path.join(
219
- train_config["task_name"], f"final_{train_config['ddpm_ckpt_name']}"
220
- )
221
- save_checkpoint(
222
- model,
223
- optimizer,
224
- scheduler,
225
- train_config["num_epochs"] - 1,
226
- avg_loss,
227
- step,
228
- run,
229
- final_model_path,
230
- )
231
- logging.info("Saved Model to Wandb and local")
232
-
233
- # Log a summary table of training
234
- summary_table = wandb.Table(columns=["Metric", "Value"])
235
- summary_table.add_data("Final Loss", avg_loss)
236
- summary_table.add_data("Best Loss", best_loss)
237
- summary_table.add_data("Best Epoch", epoch)
238
- summary_table.add_data("Total Steps", step)
239
- summary_table.add_data("Training Time (hours)", wandb.run.duration / 3600)
240
-
241
- wandb.log({"training_summary": summary_table})
242
-
243
- # Finish the run
244
- wandb.finish()
245
-
246
-
247
- def visualize_samples(model, scheduler, epoch, num_timesteps, device, num_samples=4):
248
- """
249
- Generate sample images from noise and log to wandb
250
- """
251
- model.eval()
252
- with torch.no_grad():
253
- # Start with random noise
254
- samples = torch.randn(num_samples, 1, 28, 28).to(device)
255
-
256
- # Store the denoising process
257
- sample_images = []
258
-
259
- # Record more frequently at the beginning of sampling
260
- log_steps = set([0, 20, 50, 100, 200, 400, 600, 800, num_timesteps - 1])
261
-
262
- # Denoise gradually
263
- for i in tqdm(reversed(range(num_timesteps)), desc="Sampling"):
264
- t = torch.full((num_samples,), i, device=device, dtype=torch.long)
265
-
266
- # Get model prediction and update sample
267
- predicted_noise = model(samples, t)
268
- samples = scheduler.step(predicted_noise, i, samples)
269
-
270
- # Save images at specified timesteps
271
- if i in log_steps:
272
- # Denormalize and convert to numpy for logging
273
- denorm_samples = samples.clamp(-1, 1).cpu().numpy()
274
- denorm_samples = (
275
- denorm_samples + 1
276
- ) / 2.0 # scale from [-1, 1] to [0, 1]
277
- sample_images.append((i, denorm_samples))
278
-
279
- # Create a grid to show denoising process
280
- images_to_log = {}
281
-
282
- # Log individual samples
283
- for i, sample in enumerate(samples):
284
- sample_np = sample.clamp(-1, 1).cpu().numpy()
285
- sample_np = (sample_np + 1) / 2.0 # scale from [-1, 1] to [0, 1]
286
- images_to_log[f"sample_{i}_epoch_{epoch}"] = wandb.Image(
287
- sample_np[0], caption=f"Sample {i}, Epoch {epoch}"
288
- )
289
-
290
- # Log denoising process for first sample
291
- denoising_steps = []
292
- for step_idx, samples_np in sample_images:
293
- denoising_steps.append(
294
- wandb.Image(
295
- samples_np[0][0],
296
- caption=f"Step {num_timesteps - step_idx}/{num_timesteps}",
297
- )
298
- )
299
-
300
- images_to_log["denoising_process_epoch_" + str(epoch)] = denoising_steps
301
-
302
- # Log all images
303
- wandb.log(images_to_log)
304
-
305
- model.train()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
uv.lock CHANGED
@@ -2,8 +2,10 @@ version = 1
2
  revision = 1
3
  requires-python = ">=3.13"
4
  resolution-markers = [
5
- "sys_platform == 'linux'",
6
- "sys_platform != 'linux'",
 
 
7
  ]
8
 
9
  [[package]]
@@ -73,6 +75,7 @@ version = "0.1.0"
73
  source = { virtual = "." }
74
  dependencies = [
75
  { name = "numpy" },
 
76
  { name = "pandas" },
77
  { name = "torch" },
78
  { name = "torchvision" },
@@ -83,6 +86,7 @@ dependencies = [
83
  [package.metadata]
84
  requires-dist = [
85
  { name = "numpy", specifier = ">=2.2.4" },
 
86
  { name = "pandas", specifier = ">=2.2.3" },
87
  { name = "torch", specifier = ">=2.6.0" },
88
  { name = "torchvision", specifier = ">=0.21.0" },
@@ -276,7 +280,7 @@ name = "nvidia-cudnn-cu12"
276
  version = "9.1.0.70"
277
  source = { registry = "https://pypi.org/simple" }
278
  dependencies = [
279
- { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" },
280
  ]
281
  wheels = [
282
  { url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
@@ -287,7 +291,7 @@ name = "nvidia-cufft-cu12"
287
  version = "11.2.1.3"
288
  source = { registry = "https://pypi.org/simple" }
289
  dependencies = [
290
- { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
291
  ]
292
  wheels = [
293
  { url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 },
@@ -306,9 +310,9 @@ name = "nvidia-cusolver-cu12"
306
  version = "11.6.1.9"
307
  source = { registry = "https://pypi.org/simple" }
308
  dependencies = [
309
- { name = "nvidia-cublas-cu12", marker = "sys_platform == 'linux'" },
310
- { name = "nvidia-cusparse-cu12", marker = "sys_platform == 'linux'" },
311
- { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
312
  ]
313
  wheels = [
314
  { url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 },
@@ -319,7 +323,7 @@ name = "nvidia-cusparse-cu12"
319
  version = "12.3.1.170"
320
  source = { registry = "https://pypi.org/simple" }
321
  dependencies = [
322
- { name = "nvidia-nvjitlink-cu12", marker = "sys_platform == 'linux'" },
323
  ]
324
  wheels = [
325
  { url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 },
@@ -357,6 +361,23 @@ wheels = [
357
  { url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 },
358
  ]
359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  [[package]]
361
  name = "pandas"
362
  version = "2.2.3"
 
2
  revision = 1
3
  requires-python = ">=3.13"
4
  resolution-markers = [
5
+ "platform_machine == 'aarch64' and sys_platform == 'linux'",
6
+ "platform_machine != 'aarch64' and sys_platform == 'linux'",
7
+ "sys_platform == 'darwin'",
8
+ "sys_platform != 'darwin' and sys_platform != 'linux'",
9
  ]
10
 
11
  [[package]]
 
75
  source = { virtual = "." }
76
  dependencies = [
77
  { name = "numpy" },
78
+ { name = "opencv-python" },
79
  { name = "pandas" },
80
  { name = "torch" },
81
  { name = "torchvision" },
 
86
  [package.metadata]
87
  requires-dist = [
88
  { name = "numpy", specifier = ">=2.2.4" },
89
+ { name = "opencv-python", specifier = ">=4.11.0.86" },
90
  { name = "pandas", specifier = ">=2.2.3" },
91
  { name = "torch", specifier = ">=2.6.0" },
92
  { name = "torchvision", specifier = ">=0.21.0" },
 
280
  version = "9.1.0.70"
281
  source = { registry = "https://pypi.org/simple" }
282
  dependencies = [
283
+ { name = "nvidia-cublas-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" },
284
  ]
285
  wheels = [
286
  { url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
 
291
  version = "11.2.1.3"
292
  source = { registry = "https://pypi.org/simple" }
293
  dependencies = [
294
+ { name = "nvidia-nvjitlink-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" },
295
  ]
296
  wheels = [
297
  { url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 },
 
310
  version = "11.6.1.9"
311
  source = { registry = "https://pypi.org/simple" }
312
  dependencies = [
313
+ { name = "nvidia-cublas-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" },
314
+ { name = "nvidia-cusparse-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" },
315
+ { name = "nvidia-nvjitlink-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" },
316
  ]
317
  wheels = [
318
  { url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 },
 
323
  version = "12.3.1.170"
324
  source = { registry = "https://pypi.org/simple" }
325
  dependencies = [
326
+ { name = "nvidia-nvjitlink-cu12", marker = "platform_machine != 'aarch64' and sys_platform == 'linux'" },
327
  ]
328
  wheels = [
329
  { url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 },
 
361
  { url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 },
362
  ]
363
 
364
+ [[package]]
365
+ name = "opencv-python"
366
+ version = "4.11.0.86"
367
+ source = { registry = "https://pypi.org/simple" }
368
+ dependencies = [
369
+ { name = "numpy" },
370
+ ]
371
+ sdist = { url = "https://files.pythonhosted.org/packages/17/06/68c27a523103dad5837dc5b87e71285280c4f098c60e4fe8a8db6486ab09/opencv-python-4.11.0.86.tar.gz", hash = "sha256:03d60ccae62304860d232272e4a4fda93c39d595780cb40b161b310244b736a4", size = 95171956 }
372
+ wheels = [
373
+ { url = "https://files.pythonhosted.org/packages/05/4d/53b30a2a3ac1f75f65a59eb29cf2ee7207ce64867db47036ad61743d5a23/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:432f67c223f1dc2824f5e73cdfcd9db0efc8710647d4e813012195dc9122a52a", size = 37326322 },
374
+ { url = "https://files.pythonhosted.org/packages/3b/84/0a67490741867eacdfa37bc18df96e08a9d579583b419010d7f3da8ff503/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:9d05ef13d23fe97f575153558653e2d6e87103995d54e6a35db3f282fe1f9c66", size = 56723197 },
375
+ { url = "https://files.pythonhosted.org/packages/f3/bd/29c126788da65c1fb2b5fb621b7fed0ed5f9122aa22a0868c5e2c15c6d23/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b92ae2c8852208817e6776ba1ea0d6b1e0a1b5431e971a2a0ddd2a8cc398202", size = 42230439 },
376
+ { url = "https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b02611523803495003bd87362db3e1d2a0454a6a63025dc6658a9830570aa0d", size = 62986597 },
377
+ { url = "https://files.pythonhosted.org/packages/fb/d7/1d5941a9dde095468b288d989ff6539dd69cd429dbf1b9e839013d21b6f0/opencv_python-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:810549cb2a4aedaa84ad9a1c92fbfdfc14090e2749cedf2c1589ad8359aa169b", size = 29384337 },
378
+ { url = "https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec", size = 39488044 },
379
+ ]
380
+
381
  [[package]]
382
  name = "pandas"
383
  version = "2.2.3"