jessehostetler commited on
Commit
82e6e64
·
1 Parent(s): 74e27de

Fix challenge-cli not setting storage when using --volume and friends. Improve error messages. Fix multiple issues with Dockerfile.gpu. Change make docker-run and make docker-run-gpu to use docker flags that match the Dyff cloud environment by default.

Browse files
Dockerfile.gpu CHANGED
@@ -1,25 +1,23 @@
1
- FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04
 
 
2
 
3
  # Install 'uv', which we will use to install Python dependencies
4
  COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
 
5
 
6
- ENV PYTHONDONTWRITEBYTECODE="1" \
7
- PYTHONUNBUFFERED="1" \
8
- DEBIAN_FRONTEND="noninteractive"
 
 
 
 
9
 
10
- # Install Python and create venv
11
- WORKDIR /app/
12
  RUN uv python install 3.12
13
  RUN uv venv
14
  RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel
15
 
16
- # Download models during build instead of copying from local
17
- COPY scripts/model_download.bash /tmp/model_download.bash
18
- RUN . .venv/bin/activate && \
19
- uv pip install --no-cache-dir huggingface-hub && \
20
- bash /tmp/model_download.bash && \
21
- rm /tmp/model_download.bash
22
-
23
  # Install CPU requirements
24
  COPY requirements.cpu.txt ./
25
  RUN uv pip install --no-cache-dir -r ./requirements.cpu.txt
@@ -28,9 +26,51 @@ RUN uv pip install --no-cache-dir -r ./requirements.cpu.txt
28
  COPY requirements.torch.gpu.txt ./
29
  RUN uv pip install --no-cache-dir -r ./requirements.torch.gpu.txt
30
 
 
 
 
 
 
 
 
31
  COPY app ./app
32
  COPY main.py ./
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  EXPOSE 8000
35
 
36
- ENTRYPOINT ["python3", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This Dockerfile is based on this post in the uv GitHub:
2
+ # https://github.com/astral-sh/uv/issues/7758#issuecomment-3263282018
3
+ FROM python:3.12-slim AS build
4
 
5
  # Install 'uv', which we will use to install Python dependencies
6
  COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
7
+ WORKDIR /app
8
 
9
+ # uv configuration
10
+ # Ref: https://docs.astral.sh/uv/guides/integration/docker/#compiling-bytecode
11
+ ENV UV_COMPILE_BYTECODE=1 \
12
+ # Ref: https://docs.astral.sh/uv/guides/integration/docker/#caching
13
+ UV_LINK_MODE=copy \
14
+ # Ref: https://docs.astral.sh/uv/guides/integration/docker/#managing-python-interpreters
15
+ UV_PYTHON_INSTALL_DIR=/opt/python
16
 
 
 
17
  RUN uv python install 3.12
18
  RUN uv venv
19
  RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel
20
 
 
 
 
 
 
 
 
21
  # Install CPU requirements
22
  COPY requirements.cpu.txt ./
23
  RUN uv pip install --no-cache-dir -r ./requirements.cpu.txt
 
26
  COPY requirements.torch.gpu.txt ./
27
  RUN uv pip install --no-cache-dir -r ./requirements.torch.gpu.txt
28
 
29
+ # Download models during build instead of copying from local
30
+ COPY scripts/model_download.bash /tmp/model_download.bash
31
+ RUN . .venv/bin/activate && \
32
+ uv pip install --no-cache-dir huggingface-hub && \
33
+ bash /tmp/model_download.bash && \
34
+ rm /tmp/model_download.bash
35
+
36
  COPY app ./app
37
  COPY main.py ./
38
 
39
+ # ----------------------------------------------------------------------------
40
+
41
+ FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04 AS run
42
+ WORKDIR /app
43
+
44
+ ENV PYTHONDONTWRITEBYTECODE="1" \
45
+ PYTHONUNBUFFERED="1" \
46
+ DEBIAN_FRONTEND="noninteractive"
47
+
48
+ # The container will run as a non-root user with unknown UID and GID, so we
49
+ # need to give everyone permission to all of the files
50
+ COPY --from=build --chmod=777 /opt/python /opt/python
51
+ COPY --from=build --chmod=777 /app /app
52
+
53
+ # Add python env to PATH and app folder to PYTHONPATH
54
+ ENV PATH="/app/.venv/bin:${PATH}" \
55
+ PYTHONPATH="/app"
56
+
57
  EXPOSE 8000
58
 
59
+ # torch.compile tries to cache things in /tmp/torchinductor_{username},
60
+ # which doesn't work because we don't have a user account
61
+ ENV TORCHINDUCTOR_CACHE_DIR="/tmp/torchinductor"
62
+
63
+ # COPY entrypoint.sh ./entrypoint.sh
64
+ # RUN chmod a+x /app/entrypoint.sh
65
+ # RUN chmod -R a+x /app/.venv/bin
66
+ # ENTRYPOINT ["./entrypoint.sh", ".venv/bin/python", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
67
+
68
+ # ENV PATH="/app/.venv/bin:$PATH"
69
+ # ENTRYPOINT [".", "/app/.venv/bin/activate;", "python", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
70
+
71
+ # ENTRYPOINT ["uv", "run", "--no-cache", "--no-sync", "python", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
72
+
73
+ # RUN chmod a+x .venv/bin/python
74
+ # ENTRYPOINT ["/app/.venv/bin/python", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
75
+
76
+ ENTRYPOINT ["python", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
challenge-cli.py CHANGED
@@ -41,6 +41,25 @@ def _wait_for_status(
41
  raise AssertionError("timeout")
42
 
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  def _common_options(f):
45
  @click.option(
46
  "--account",
@@ -49,6 +68,13 @@ def _common_options(f):
49
  help="Your account ID",
50
  metavar="ID",
51
  )
 
 
 
 
 
 
 
52
  @functools.wraps(f)
53
  def wrapper(*args, **kwargs):
54
  return f(*args, **kwargs)
@@ -130,8 +156,9 @@ def upload_submission(
130
  artifact_id: str | None,
131
  model_id: str | None,
132
  gpu: bool,
 
133
  ) -> None:
134
- dyffapi = Client()
135
 
136
  # Upload the image
137
  if artifact_id is None:
@@ -164,15 +191,20 @@ def upload_submission(
164
  assert artifact is not None
165
 
166
  model: Model | None = None
 
167
  if model_id is None:
168
  if volume is not None:
169
  if volume_mount is None:
170
  raise click.UsageError("--volume-mount is required when --volume is used")
171
 
172
- click.echo("creating Model from local directory ...")
 
173
 
174
  model = dyffapi.models.create_from_volume(
175
- volume, name="model_volume", account=account, resources=ModelResources()
 
 
 
176
  )
177
  assert model is not None
178
  click.echo(f"model.id: \"{model.id}\"")
@@ -197,11 +229,22 @@ def upload_submission(
197
  model = dyffapi.models.get(model_id)
198
  if model is None:
199
  raise click.UsageError(f"--model={model_id}: model not found")
 
 
 
 
 
 
200
 
201
  # Create a runnable InferenceService
202
  if volume_mount is not None:
203
  if model is None:
204
  raise click.UsageError("--volume-mount requires --volume or --model")
 
 
 
 
 
205
  if not volume_mount.is_absolute():
206
  raise click.UsageError("--volume-mount must be an absolute path")
207
  volumeMounts=[
@@ -238,7 +281,7 @@ def upload_submission(
238
  runner=InferenceServiceRunner(
239
  kind=InferenceServiceRunnerKind.CONTAINER,
240
  imageRef=EntityIdentifier.of(artifact),
241
- resources=ModelResources(),
242
  volumeMounts=volumeMounts,
243
  accelerator=accelerator,
244
  ),
@@ -287,8 +330,8 @@ def upload_submission(
287
  help="The Challenge ID to submit to.",
288
  metavar="ID",
289
  )
290
- def submit(account: str, task_id: str, team_id: str, service_id: str, challenge_id: str) -> None:
291
- dyffapi = Client()
292
 
293
  challenge_tasks = {
294
  "dc509a8c771b492b90c43012fde9a04f": {
 
41
  raise AssertionError("timeout")
42
 
43
 
44
+ def _directory_size_bytes(directory: Path | str) -> int:
45
+ directory = Path(directory)
46
+ size: int = 0
47
+ for f in directory.rglob("*"):
48
+ if f.exists() and not f.is_symlink():
49
+ size += f.stat().st_size
50
+ return size
51
+
52
+
53
+ def _storage_for_size(size_bytes: int) -> str:
54
+ """Convert bytes to k8s Quantity and add some overhead in case the size
55
+ is different on different filesystems.
56
+ """
57
+ Gi = int(1024**3)
58
+ size_B_with_overhead = size_bytes + 2*Gi
59
+ size_Gi_with_overhead = size_B_with_overhead // Gi
60
+ return f"{size_Gi_with_overhead}Gi"
61
+
62
+
63
  def _common_options(f):
64
  @click.option(
65
  "--account",
 
68
  help="Your account ID",
69
  metavar="ID",
70
  )
71
+ @click.option(
72
+ "--timeout-seconds",
73
+ type=int,
74
+ default=120,
75
+ help="Timeout for Dyff client operations",
76
+ metavar="SECONDS",
77
+ )
78
  @functools.wraps(f)
79
  def wrapper(*args, **kwargs):
80
  return f(*args, **kwargs)
 
156
  artifact_id: str | None,
157
  model_id: str | None,
158
  gpu: bool,
159
+ timeout_seconds: int,
160
  ) -> None:
161
+ dyffapi = Client(timeout=httpx.Timeout(timeout_seconds))
162
 
163
  # Upload the image
164
  if artifact_id is None:
 
191
  assert artifact is not None
192
 
193
  model: Model | None = None
194
+ model_storage_quantity: str | None = None
195
  if model_id is None:
196
  if volume is not None:
197
  if volume_mount is None:
198
  raise click.UsageError("--volume-mount is required when --volume is used")
199
 
200
+ model_storage_quantity = _storage_for_size(_directory_size_bytes(volume))
201
+ click.echo(f"creating Model from local directory with storage={model_storage_quantity} ...")
202
 
203
  model = dyffapi.models.create_from_volume(
204
+ volume,
205
+ name="model_volume",
206
+ account=account,
207
+ resources=ModelResources(storage=model_storage_quantity),
208
  )
209
  assert model is not None
210
  click.echo(f"model.id: \"{model.id}\"")
 
229
  model = dyffapi.models.get(model_id)
230
  if model is None:
231
  raise click.UsageError(f"--model={model_id}: model not found")
232
+ model_storage_quantity = model.resources.storage
233
+ if model_storage_quantity is None:
234
+ raise click.UsageError(
235
+ f"--model={model_id}: model.resources.storage not set;"
236
+ " was this model created with the challenge-cli tool?"
237
+ )
238
 
239
  # Create a runnable InferenceService
240
  if volume_mount is not None:
241
  if model is None:
242
  raise click.UsageError("--volume-mount requires --volume or --model")
243
+ if model_storage_quantity is None:
244
+ raise click.UsageError(
245
+ f"model {model.id}: model.resources.storage not set;"
246
+ " was this model created with the challenge-cli tool?"
247
+ )
248
  if not volume_mount.is_absolute():
249
  raise click.UsageError("--volume-mount must be an absolute path")
250
  volumeMounts=[
 
281
  runner=InferenceServiceRunner(
282
  kind=InferenceServiceRunnerKind.CONTAINER,
283
  imageRef=EntityIdentifier.of(artifact),
284
+ resources=ModelResources(storage=model_storage_quantity),
285
  volumeMounts=volumeMounts,
286
  accelerator=accelerator,
287
  ),
 
330
  help="The Challenge ID to submit to.",
331
  metavar="ID",
332
  )
333
+ def submit(account: str, task_id: str, team_id: str, service_id: str, challenge_id: str, timeout_seconds: int) -> None:
334
+ dyffapi = Client(timeout=httpx.Timeout(timeout_seconds))
335
 
336
  challenge_tasks = {
337
  "dc509a8c771b492b90c43012fde9a04f": {
makefile CHANGED
@@ -23,15 +23,11 @@ docker-build:
23
 
24
  .PHONY: docker-run
25
  docker-run:
26
- docker run --rm -it -p 8000:8000 $(IMAGE)
27
 
28
  .PHONY: docker-test-no-internet
29
  docker-test-no-internet:
30
- docker run --rm -it --network none $(IMAGE)
31
-
32
- .PHONY: docker-test-readonly
33
- docker-test-readonly:
34
- docker run --rm -it --read-only --tmpfs "/tmp" $(IMAGE)
35
 
36
  .PHONY: compile
37
  compile:
@@ -48,6 +44,10 @@ compile-gpu:
48
  docker-build-gpu:
49
  docker build -t $(IMAGE)-gpu -f Dockerfile.gpu .
50
 
 
 
 
 
51
  requirements.cpu.txt: requirements.in requirements.torch.cpu.txt | $(VENV)
52
  uv pip compile --python-version 3.12 --upgrade -o requirements.cpu.txt requirements.cpu.in
53
 
 
23
 
24
  .PHONY: docker-run
25
  docker-run:
26
+ docker run --rm -it --read-only --tmpfs "/tmp" -u 1001:1001 -p 8000:8000 $(IMAGE)
27
 
28
  .PHONY: docker-test-no-internet
29
  docker-test-no-internet:
30
+ docker run --rm -it --read-only --tmpfs "/tmp" -u 1001:1001 -p 8000:8000 --network none $(IMAGE)
 
 
 
 
31
 
32
  .PHONY: compile
33
  compile:
 
44
  docker-build-gpu:
45
  docker build -t $(IMAGE)-gpu -f Dockerfile.gpu .
46
 
47
+ .PHONY: docker-run
48
+ docker-run-gpu:
49
+ docker run --rm -it --read-only --tmpfs "/tmp" -u 1001:1001 -p 8000:8000 $(IMAGE)-gpu
50
+
51
  requirements.cpu.txt: requirements.in requirements.torch.cpu.txt | $(VENV)
52
  uv pip compile --python-version 3.12 --upgrade -o requirements.cpu.txt requirements.cpu.in
53
 
requirements.cpu.txt CHANGED
@@ -6,7 +6,7 @@ annotated-doc==0.0.4
6
  # via fastapi
7
  annotated-types==0.7.0
8
  # via pydantic
9
- anyio==4.11.0
10
  # via
11
  # httpx
12
  # jupyter-server
@@ -20,17 +20,17 @@ arrow==1.4.0
20
  # via isoduration
21
  asttokens==3.0.1
22
  # via stack-data
23
- async-lru==2.0.5
24
  # via jupyterlab
25
  attrs==25.4.0
26
  # via
27
  # jsonschema
28
  # referencing
29
- azure-core==1.36.0
30
  # via dyff-client
31
  babel==2.17.0
32
  # via jupyterlab-server
33
- beautifulsoup4==4.14.2
34
  # via
35
  # dyff-audit
36
  # nbconvert
@@ -38,7 +38,7 @@ bleach==6.3.0
38
  # via nbconvert
39
  canonicaljson==2.0.0
40
  # via dyff-schema
41
- certifi==2025.11.12
42
  # via
43
  # httpcore
44
  # httpx
@@ -53,7 +53,7 @@ click==8.3.1
53
  # uvicorn
54
  comm==0.2.3
55
  # via ipykernel
56
- debugpy==1.8.17
57
  # via ipykernel
58
  decorator==5.2.1
59
  # via ipython
@@ -61,13 +61,13 @@ defusedxml==0.7.1
61
  # via nbconvert
62
  dnspython==2.8.0
63
  # via email-validator
64
- dyff-audit==0.16.1
65
  # via -r requirements.in
66
- dyff-client==0.23.5
67
  # via
68
  # -r requirements.in
69
  # dyff-audit
70
- dyff-schema==0.39.1
71
  # via
72
  # -r requirements.in
73
  # dyff-audit
@@ -76,17 +76,17 @@ email-validator==2.3.0
76
  # via dyff-schema
77
  executing==2.2.1
78
  # via stack-data
79
- fastapi==0.121.2
80
  # via -r requirements.in
81
  fastjsonschema==2.21.2
82
  # via nbformat
83
- filelock==3.20.0
84
  # via
85
  # huggingface-hub
86
  # transformers
87
  fqdn==1.5.1
88
  # via jsonschema
89
- fsspec==2025.10.0
90
  # via huggingface-hub
91
  google-i18n-address==3.1.1
92
  # via dyff-schema
@@ -108,7 +108,7 @@ huggingface-hub==0.36.0
108
  # via
109
  # tokenizers
110
  # transformers
111
- hypothesis==6.148.1
112
  # via
113
  # dyff-schema
114
  # hypothesis-jsonschema
@@ -123,7 +123,7 @@ idna==3.11
123
  # requests
124
  ipykernel==7.1.0
125
  # via jupyterlab
126
- ipython==9.7.0
127
  # via ipykernel
128
  ipython-pygments-lexers==1.1.1
129
  # via ipython
@@ -139,7 +139,7 @@ jinja2==3.1.6
139
  # jupyterlab
140
  # jupyterlab-server
141
  # nbconvert
142
- json5==0.12.1
143
  # via jupyterlab-server
144
  jsonpath-ng==1.7.0
145
  # via
@@ -147,7 +147,7 @@ jsonpath-ng==1.7.0
147
  # dyff-schema
148
  jsonpointer==3.0.0
149
  # via jsonschema
150
- jsonschema==4.25.1
151
  # via
152
  # hypothesis-jsonschema
153
  # jupyter-events
@@ -155,7 +155,7 @@ jsonschema==4.25.1
155
  # nbformat
156
  jsonschema-specifications==2025.9.1
157
  # via jsonschema
158
- jupyter-client==8.6.3
159
  # via
160
  # ipykernel
161
  # jupyter-server
@@ -180,9 +180,9 @@ jupyter-server==2.17.0
180
  # jupyterlab-server
181
  # notebook
182
  # notebook-shim
183
- jupyter-server-terminals==0.5.3
184
  # via jupyter-server
185
- jupyterlab==4.4.10
186
  # via notebook
187
  jupyterlab-pygments==0.3.0
188
  # via nbconvert
@@ -202,9 +202,9 @@ matplotlib-inline==0.2.1
202
  # via
203
  # ipykernel
204
  # ipython
205
- mistune==3.1.4
206
  # via nbconvert
207
- nbclient==0.10.2
208
  # via nbconvert
209
  nbconvert==7.16.6
210
  # via
@@ -218,7 +218,7 @@ nbformat==5.10.4
218
  # nbconvert
219
  nest-asyncio==1.6.0
220
  # via ipykernel
221
- notebook==7.4.7
222
  # via dyff-audit
223
  notebook-shim==0.2.4
224
  # via
@@ -232,7 +232,7 @@ numpy==1.26.4
232
  # dyff-schema
233
  # pandas
234
  # transformers
235
- packaging==25.0
236
  # via
237
  # huggingface-hub
238
  # ipykernel
@@ -242,7 +242,7 @@ packaging==25.0
242
  # jupyterlab-server
243
  # nbconvert
244
  # transformers
245
- pandas==2.3.3
246
  # via
247
  # -r requirements.in
248
  # dyff-audit
@@ -253,17 +253,17 @@ parso==0.8.5
253
  # via jedi
254
  pexpect==4.9.0
255
  # via ipython
256
- pillow==12.0.0
257
  # via -r requirements.in
258
- platformdirs==4.5.0
259
  # via jupyter-core
260
  ply==3.11
261
  # via jsonpath-ng
262
- prometheus-client==0.23.1
263
  # via jupyter-server
264
  prompt-toolkit==3.0.52
265
  # via ipython
266
- psutil==7.1.3
267
  # via ipykernel
268
  ptyprocess==0.7.0
269
  # via
@@ -271,15 +271,15 @@ ptyprocess==0.7.0
271
  # terminado
272
  pure-eval==0.2.3
273
  # via stack-data
274
- pyarrow==22.0.0
275
  # via
276
  # -r requirements.in
277
  # dyff-audit
278
  # dyff-client
279
  # dyff-schema
280
- pycparser==2.23
281
  # via cffi
282
- pydantic==2.12.4
283
  # via
284
  # -r requirements.in
285
  # dyff-audit
@@ -308,10 +308,8 @@ python-dotenv==1.2.1
308
  # uvicorn
309
  python-json-logger==4.0.0
310
  # via jupyter-events
311
- python-multipart==0.0.20
312
  # via -r requirements.in
313
- pytz==2025.2
314
- # via pandas
315
  pyyaml==6.0.3
316
  # via
317
  # huggingface-hub
@@ -328,7 +326,7 @@ referencing==0.37.0
328
  # jsonschema
329
  # jsonschema-specifications
330
  # jupyter-events
331
- regex==2025.11.3
332
  # via transformers
333
  requests==2.32.5
334
  # via
@@ -348,33 +346,29 @@ rfc3986-validator==0.1.1
348
  # jupyter-events
349
  rfc3987-syntax==1.1.0
350
  # via jsonschema
351
- rpds-py==0.29.0
352
  # via
353
  # jsonschema
354
  # referencing
355
- ruamel-yaml==0.18.16
356
  # via dyff-audit
357
- ruamel-yaml-clib==0.2.15
358
- # via ruamel-yaml
359
- safetensors==0.6.2
360
  # via transformers
361
- send2trash==1.8.3
362
  # via jupyter-server
363
- setuptools==80.9.0
364
  # via jupyterlab
365
  six==1.17.0
366
  # via
367
  # python-dateutil
368
  # rfc3339-validator
369
- sniffio==1.3.1
370
- # via anyio
371
  sortedcontainers==2.4.0
372
  # via hypothesis
373
- soupsieve==2.8
374
  # via beautifulsoup4
375
  stack-data==0.6.3
376
  # via ipython
377
- starlette==0.49.3
378
  # via fastapi
379
  terminado==0.18.1
380
  # via
@@ -382,9 +376,9 @@ terminado==0.18.1
382
  # jupyter-server-terminals
383
  tinycss2==1.4.0
384
  # via bleach
385
- tokenizers==0.22.1
386
  # via transformers
387
- tornado==6.5.2
388
  # via
389
  # ipykernel
390
  # jupyter-client
@@ -410,7 +404,7 @@ traitlets==5.14.3
410
  # nbclient
411
  # nbconvert
412
  # nbformat
413
- transformers==4.57.1
414
  # via -r requirements.in
415
  typing-extensions==4.15.0
416
  # via
@@ -428,21 +422,19 @@ typing-inspection==0.4.2
428
  # via
429
  # pydantic
430
  # pydantic-settings
431
- tzdata==2025.2
432
- # via
433
- # arrow
434
- # pandas
435
  uri-template==1.3.0
436
  # via jsonschema
437
- urllib3==2.5.0
438
  # via requests
439
- uvicorn==0.38.0
440
  # via -r requirements.in
441
  uvloop==0.22.1
442
  # via uvicorn
443
  watchfiles==1.1.1
444
  # via uvicorn
445
- wcwidth==0.2.14
446
  # via prompt-toolkit
447
  webcolors==25.10.0
448
  # via jsonschema
@@ -452,7 +444,7 @@ webencodings==0.5.1
452
  # tinycss2
453
  websocket-client==1.9.0
454
  # via jupyter-server
455
- websockets==15.0.1
456
  # via
457
  # dyff-client
458
  # uvicorn
 
6
  # via fastapi
7
  annotated-types==0.7.0
8
  # via pydantic
9
+ anyio==4.12.1
10
  # via
11
  # httpx
12
  # jupyter-server
 
20
  # via isoduration
21
  asttokens==3.0.1
22
  # via stack-data
23
+ async-lru==2.1.0
24
  # via jupyterlab
25
  attrs==25.4.0
26
  # via
27
  # jsonschema
28
  # referencing
29
+ azure-core==1.38.0
30
  # via dyff-client
31
  babel==2.17.0
32
  # via jupyterlab-server
33
+ beautifulsoup4==4.14.3
34
  # via
35
  # dyff-audit
36
  # nbconvert
 
38
  # via nbconvert
39
  canonicaljson==2.0.0
40
  # via dyff-schema
41
+ certifi==2026.1.4
42
  # via
43
  # httpcore
44
  # httpx
 
53
  # uvicorn
54
  comm==0.2.3
55
  # via ipykernel
56
+ debugpy==1.8.19
57
  # via ipykernel
58
  decorator==5.2.1
59
  # via ipython
 
61
  # via nbconvert
62
  dnspython==2.8.0
63
  # via email-validator
64
+ dyff-audit==0.16.8
65
  # via -r requirements.in
66
+ dyff-client==0.23.11
67
  # via
68
  # -r requirements.in
69
  # dyff-audit
70
+ dyff-schema==0.43.4
71
  # via
72
  # -r requirements.in
73
  # dyff-audit
 
76
  # via dyff-schema
77
  executing==2.2.1
78
  # via stack-data
79
+ fastapi==0.128.0
80
  # via -r requirements.in
81
  fastjsonschema==2.21.2
82
  # via nbformat
83
+ filelock==3.20.3
84
  # via
85
  # huggingface-hub
86
  # transformers
87
  fqdn==1.5.1
88
  # via jsonschema
89
+ fsspec==2026.1.0
90
  # via huggingface-hub
91
  google-i18n-address==3.1.1
92
  # via dyff-schema
 
108
  # via
109
  # tokenizers
110
  # transformers
111
+ hypothesis==6.150.2
112
  # via
113
  # dyff-schema
114
  # hypothesis-jsonschema
 
123
  # requests
124
  ipykernel==7.1.0
125
  # via jupyterlab
126
+ ipython==9.9.0
127
  # via ipykernel
128
  ipython-pygments-lexers==1.1.1
129
  # via ipython
 
139
  # jupyterlab
140
  # jupyterlab-server
141
  # nbconvert
142
+ json5==0.13.0
143
  # via jupyterlab-server
144
  jsonpath-ng==1.7.0
145
  # via
 
147
  # dyff-schema
148
  jsonpointer==3.0.0
149
  # via jsonschema
150
+ jsonschema==4.26.0
151
  # via
152
  # hypothesis-jsonschema
153
  # jupyter-events
 
155
  # nbformat
156
  jsonschema-specifications==2025.9.1
157
  # via jsonschema
158
+ jupyter-client==8.8.0
159
  # via
160
  # ipykernel
161
  # jupyter-server
 
180
  # jupyterlab-server
181
  # notebook
182
  # notebook-shim
183
+ jupyter-server-terminals==0.5.4
184
  # via jupyter-server
185
+ jupyterlab==4.5.2
186
  # via notebook
187
  jupyterlab-pygments==0.3.0
188
  # via nbconvert
 
202
  # via
203
  # ipykernel
204
  # ipython
205
+ mistune==3.2.0
206
  # via nbconvert
207
+ nbclient==0.10.4
208
  # via nbconvert
209
  nbconvert==7.16.6
210
  # via
 
218
  # nbconvert
219
  nest-asyncio==1.6.0
220
  # via ipykernel
221
+ notebook==7.5.2
222
  # via dyff-audit
223
  notebook-shim==0.2.4
224
  # via
 
232
  # dyff-schema
233
  # pandas
234
  # transformers
235
+ packaging==26.0
236
  # via
237
  # huggingface-hub
238
  # ipykernel
 
242
  # jupyterlab-server
243
  # nbconvert
244
  # transformers
245
+ pandas==3.0.0
246
  # via
247
  # -r requirements.in
248
  # dyff-audit
 
253
  # via jedi
254
  pexpect==4.9.0
255
  # via ipython
256
+ pillow==12.1.0
257
  # via -r requirements.in
258
+ platformdirs==4.5.1
259
  # via jupyter-core
260
  ply==3.11
261
  # via jsonpath-ng
262
+ prometheus-client==0.24.1
263
  # via jupyter-server
264
  prompt-toolkit==3.0.52
265
  # via ipython
266
+ psutil==7.2.1
267
  # via ipykernel
268
  ptyprocess==0.7.0
269
  # via
 
271
  # terminado
272
  pure-eval==0.2.3
273
  # via stack-data
274
+ pyarrow==23.0.0
275
  # via
276
  # -r requirements.in
277
  # dyff-audit
278
  # dyff-client
279
  # dyff-schema
280
+ pycparser==3.0
281
  # via cffi
282
+ pydantic==2.12.5
283
  # via
284
  # -r requirements.in
285
  # dyff-audit
 
308
  # uvicorn
309
  python-json-logger==4.0.0
310
  # via jupyter-events
311
+ python-multipart==0.0.21
312
  # via -r requirements.in
 
 
313
  pyyaml==6.0.3
314
  # via
315
  # huggingface-hub
 
326
  # jsonschema
327
  # jsonschema-specifications
328
  # jupyter-events
329
+ regex==2026.1.15
330
  # via transformers
331
  requests==2.32.5
332
  # via
 
346
  # jupyter-events
347
  rfc3987-syntax==1.1.0
348
  # via jsonschema
349
+ rpds-py==0.30.0
350
  # via
351
  # jsonschema
352
  # referencing
353
+ ruamel-yaml==0.19.1
354
  # via dyff-audit
355
+ safetensors==0.7.0
 
 
356
  # via transformers
357
+ send2trash==2.1.0
358
  # via jupyter-server
359
+ setuptools==80.10.1
360
  # via jupyterlab
361
  six==1.17.0
362
  # via
363
  # python-dateutil
364
  # rfc3339-validator
 
 
365
  sortedcontainers==2.4.0
366
  # via hypothesis
367
+ soupsieve==2.8.3
368
  # via beautifulsoup4
369
  stack-data==0.6.3
370
  # via ipython
371
+ starlette==0.50.0
372
  # via fastapi
373
  terminado==0.18.1
374
  # via
 
376
  # jupyter-server-terminals
377
  tinycss2==1.4.0
378
  # via bleach
379
+ tokenizers==0.22.2
380
  # via transformers
381
+ tornado==6.5.4
382
  # via
383
  # ipykernel
384
  # jupyter-client
 
404
  # nbclient
405
  # nbconvert
406
  # nbformat
407
+ transformers==4.57.6
408
  # via -r requirements.in
409
  typing-extensions==4.15.0
410
  # via
 
422
  # via
423
  # pydantic
424
  # pydantic-settings
425
+ tzdata==2025.3
426
+ # via arrow
 
 
427
  uri-template==1.3.0
428
  # via jsonschema
429
+ urllib3==2.6.3
430
  # via requests
431
+ uvicorn==0.40.0
432
  # via -r requirements.in
433
  uvloop==0.22.1
434
  # via uvicorn
435
  watchfiles==1.1.1
436
  # via uvicorn
437
+ wcwidth==0.3.0
438
  # via prompt-toolkit
439
  webcolors==25.10.0
440
  # via jsonschema
 
444
  # tinycss2
445
  websocket-client==1.9.0
446
  # via jupyter-server
447
+ websockets==16.0
448
  # via
449
  # dyff-client
450
  # uvicorn
requirements.in CHANGED
@@ -1,6 +1,6 @@
1
  dyff-audit
2
  dyff-client
3
- dyff-schema>=0.39.1
4
 
5
  click
6
 
 
1
  dyff-audit
2
  dyff-client
3
+ dyff-schema>=0.43.4
4
 
5
  click
6
 
requirements.torch.cpu.txt CHANGED
@@ -1,3 +1,3 @@
1
  --index-url https://download.pytorch.org/whl/cpu
2
- torch==2.9.1+cpu
3
- torchvision==0.24.1+cpu
 
1
  --index-url https://download.pytorch.org/whl/cpu
2
+ torch==2.10.0+cpu
3
+ torchvision==0.25.0+cpu
requirements.torch.gpu.txt CHANGED
@@ -1,8 +1,12 @@
1
  # This file was autogenerated by uv via the following command:
2
  # uv pip compile --python-version 3.12 -o requirements.torch.gpu.txt requirements.torch.gpu.in
3
- filelock==3.20.0
4
  # via torch
5
- fsspec==2025.10.0
 
 
 
 
6
  # via torch
7
  jinja2==3.1.6
8
  # via torch
@@ -10,9 +14,9 @@ markupsafe==3.0.3
10
  # via jinja2
11
  mpmath==1.3.0
12
  # via sympy
13
- networkx==3.6
14
  # via torch
15
- numpy==2.3.5
16
  # via torchvision
17
  nvidia-cublas-cu12==12.8.4.1
18
  # via
@@ -49,23 +53,23 @@ nvidia-nvjitlink-cu12==12.8.93
49
  # nvidia-cusolver-cu12
50
  # nvidia-cusparse-cu12
51
  # torch
52
- nvidia-nvshmem-cu12==3.3.20
53
  # via torch
54
  nvidia-nvtx-cu12==12.8.90
55
  # via torch
56
- pillow==12.0.0
57
  # via torchvision
58
- setuptools==80.9.0
59
  # via torch
60
  sympy==1.14.0
61
  # via torch
62
- torch==2.9.1
63
  # via
64
  # -r requirements.torch.gpu.in
65
  # torchvision
66
- torchvision==0.24.1
67
  # via -r requirements.torch.gpu.in
68
- triton==3.5.1
69
  # via torch
70
  typing-extensions==4.15.0
71
  # via torch
 
1
  # This file was autogenerated by uv via the following command:
2
  # uv pip compile --python-version 3.12 -o requirements.torch.gpu.txt requirements.torch.gpu.in
3
+ cuda-bindings==12.9.4
4
  # via torch
5
+ cuda-pathfinder==1.3.3
6
+ # via cuda-bindings
7
+ filelock==3.20.3
8
+ # via torch
9
+ fsspec==2026.1.0
10
  # via torch
11
  jinja2==3.1.6
12
  # via torch
 
14
  # via jinja2
15
  mpmath==1.3.0
16
  # via sympy
17
+ networkx==3.6.1
18
  # via torch
19
+ numpy==2.4.1
20
  # via torchvision
21
  nvidia-cublas-cu12==12.8.4.1
22
  # via
 
53
  # nvidia-cusolver-cu12
54
  # nvidia-cusparse-cu12
55
  # torch
56
+ nvidia-nvshmem-cu12==3.4.5
57
  # via torch
58
  nvidia-nvtx-cu12==12.8.90
59
  # via torch
60
+ pillow==12.1.0
61
  # via torchvision
62
+ setuptools==80.10.1
63
  # via torch
64
  sympy==1.14.0
65
  # via torch
66
+ torch==2.10.0
67
  # via
68
  # -r requirements.torch.gpu.in
69
  # torchvision
70
+ torchvision==0.25.0
71
  # via -r requirements.torch.gpu.in
72
+ triton==3.6.0
73
  # via torch
74
  typing-extensions==4.15.0
75
  # via torch