hysts HF Staff commited on
Commit
c99eddd
·
1 Parent(s): d0131ff
Files changed (7) hide show
  1. .pre-commit-config.yaml +4 -4
  2. .python-version +1 -1
  3. README.md +2 -1
  4. app.py +41 -22
  5. pyproject.toml +17 -8
  6. requirements.txt +149 -91
  7. uv.lock +0 -0
.pre-commit-config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v5.0.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
@@ -14,13 +14,13 @@ repos:
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
  - repo: https://github.com/astral-sh/ruff-pre-commit
17
- rev: v0.9.9
18
  hooks:
19
- - id: ruff
20
  args: ["--fix"]
21
  - id: ruff-format
22
  - repo: https://github.com/pre-commit/mirrors-mypy
23
- rev: v1.15.0
24
  hooks:
25
  - id: mypy
26
  args: ["--ignore-missing-imports"]
 
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v6.0.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
 
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
  - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.15.4
18
  hooks:
19
+ - id: ruff-check
20
  args: ["--fix"]
21
  - id: ruff-format
22
  - repo: https://github.com/pre-commit/mirrors-mypy
23
+ rev: v1.19.1
24
  hooks:
25
  - id: mypy
26
  args: ["--ignore-missing-imports"]
.python-version CHANGED
@@ -1 +1 @@
1
- 3.10
 
1
+ 3.12
README.md CHANGED
@@ -4,7 +4,8 @@ emoji: 🔥
4
  colorFrom: blue
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 5.40.0
 
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: blue
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 6.8.0
8
+ python_version: "3.12"
9
  app_file: app.py
10
  pinned: false
11
  ---
app.py CHANGED
@@ -12,12 +12,12 @@ import spaces
12
  import torch
13
  from loguru import logger
14
  from PIL import Image
15
- from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer
16
 
17
- model_id = os.getenv("MODEL_ID", "google/gemma-3-12b-it")
18
- processor = AutoProcessor.from_pretrained(model_id, padding_side="left")
19
  model = Gemma3ForConditionalGeneration.from_pretrained(
20
- model_id, device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager"
21
  )
22
 
23
  MAX_NUM_IMAGES = int(os.getenv("MAX_NUM_IMAGES", "5"))
@@ -164,6 +164,39 @@ def process_history(history: list[dict]) -> list[dict]:
164
 
165
 
166
  @spaces.GPU(duration=120)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  def run(message: dict, history: list[dict], system_prompt: str = "", max_new_tokens: int = 512) -> Iterator[str]:
168
  if not validate_media_constraints(message, history):
169
  yield ""
@@ -181,22 +214,9 @@ def run(message: dict, history: list[dict], system_prompt: str = "", max_new_tok
181
  tokenize=True,
182
  return_dict=True,
183
  return_tensors="pt",
184
- ).to(device=model.device, dtype=torch.bfloat16)
185
-
186
- streamer = TextIteratorStreamer(processor, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
187
- generate_kwargs = dict(
188
- inputs,
189
- streamer=streamer,
190
- max_new_tokens=max_new_tokens,
191
- disable_compile=True,
192
  )
193
- t = Thread(target=model.generate, kwargs=generate_kwargs)
194
- t.start()
195
 
196
- output = ""
197
- for delta in streamer:
198
- output += delta
199
- yield output
200
 
201
 
202
  examples = [
@@ -330,8 +350,7 @@ You can upload images, interleaved images and videos. Note that video input only
330
 
331
  demo = gr.ChatInterface(
332
  fn=run,
333
- type="messages",
334
- chatbot=gr.Chatbot(type="messages", scale=1, allow_tags=["image"]),
335
  textbox=gr.MultimodalTextbox(file_types=["image", ".mp4"], file_count="multiple", autofocus=True),
336
  multimodal=True,
337
  additional_inputs=[
@@ -344,9 +363,9 @@ demo = gr.ChatInterface(
344
  examples=examples,
345
  run_examples_on_click=False,
346
  cache_examples=False,
347
- css_paths="style.css",
348
  delete_cache=(1800, 1800),
 
349
  )
350
 
351
  if __name__ == "__main__":
352
- demo.launch()
 
12
  import torch
13
  from loguru import logger
14
  from PIL import Image
15
+ from transformers import AutoProcessor, BatchFeature, Gemma3ForConditionalGeneration, TextIteratorStreamer
16
 
17
+ MODEL_ID = os.getenv("MODEL_ID", "google/gemma-3-12b-it")
18
+ processor = AutoProcessor.from_pretrained(MODEL_ID, padding_side="left")
19
  model = Gemma3ForConditionalGeneration.from_pretrained(
20
+ MODEL_ID, device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager"
21
  )
22
 
23
  MAX_NUM_IMAGES = int(os.getenv("MAX_NUM_IMAGES", "5"))
 
164
 
165
 
166
  @spaces.GPU(duration=120)
167
+ def _run_on_gpu(inputs: BatchFeature, max_new_tokens: int) -> Iterator[str]:
168
+ inputs = inputs.to(device=model.device, dtype=torch.bfloat16)
169
+
170
+ streamer = TextIteratorStreamer(processor, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
171
+ generate_kwargs = {
172
+ **inputs,
173
+ "streamer": streamer,
174
+ "max_new_tokens": max_new_tokens,
175
+ "disable_compile": True,
176
+ }
177
+
178
+ exception_holder: list[Exception] = []
179
+
180
+ def _generate() -> None:
181
+ try:
182
+ model.generate(**generate_kwargs)
183
+ except Exception as e: # noqa: BLE001
184
+ exception_holder.append(e)
185
+
186
+ thread = Thread(target=_generate)
187
+ thread.start()
188
+
189
+ chunks: list[str] = []
190
+ for text in streamer:
191
+ chunks.append(text)
192
+ yield "".join(chunks)
193
+
194
+ thread.join()
195
+ if exception_holder:
196
+ msg = f"Generation failed: {exception_holder[0]}"
197
+ raise gr.Error(msg)
198
+
199
+
200
  def run(message: dict, history: list[dict], system_prompt: str = "", max_new_tokens: int = 512) -> Iterator[str]:
201
  if not validate_media_constraints(message, history):
202
  yield ""
 
214
  tokenize=True,
215
  return_dict=True,
216
  return_tensors="pt",
 
 
 
 
 
 
 
 
217
  )
 
 
218
 
219
+ yield from _run_on_gpu(inputs=inputs, max_new_tokens=max_new_tokens)
 
 
 
220
 
221
 
222
  examples = [
 
350
 
351
  demo = gr.ChatInterface(
352
  fn=run,
353
+ chatbot=gr.Chatbot(scale=1, allow_tags=["image"]),
 
354
  textbox=gr.MultimodalTextbox(file_types=["image", ".mp4"], file_count="multiple", autofocus=True),
355
  multimodal=True,
356
  additional_inputs=[
 
363
  examples=examples,
364
  run_examples_on_click=False,
365
  cache_examples=False,
 
366
  delete_cache=(1800, 1800),
367
+ fill_height=True,
368
  )
369
 
370
  if __name__ == "__main__":
371
+ demo.launch(css_paths="style.css")
pyproject.toml CHANGED
@@ -3,18 +3,17 @@ name = "gemma-3-12b-it"
3
  version = "0.1.0"
4
  description = ""
5
  readme = "README.md"
6
- requires-python = ">=3.10"
7
  dependencies = [
8
- "accelerate>=1.9.0",
9
- "gradio>=5.40.0",
10
- "hf-transfer>=0.1.9",
11
  "loguru>=0.7.3",
12
  "opencv-python-headless>=4.11.0.86",
13
  "protobuf>=6.30.0",
14
  "sentencepiece>=0.2.0",
15
- "spaces>=0.39.0",
16
- "torch==2.4.0",
17
- "transformers==4.51.3",
18
  ]
19
 
20
  [tool.ruff]
@@ -39,7 +38,7 @@ ignore = [
39
  "EM101", # raw-string-in-exception
40
  "FBT001", # boolean-type-hint-positional-argument
41
  "FBT002", # boolean-default-value-positional-argument
42
- "PD901", # pandas-df-variable-name
43
  "PGH003", # blanket-type-ignore
44
  "PLR0913", # too-many-arguments
45
  "PLR0915", # too-many-statements
@@ -57,3 +56,13 @@ convention = "google"
57
 
58
  [tool.ruff.format]
59
  docstring-code-format = true
 
 
 
 
 
 
 
 
 
 
 
3
  version = "0.1.0"
4
  description = ""
5
  readme = "README.md"
6
+ requires-python = ">=3.12"
7
  dependencies = [
8
+ "accelerate>=1.12.0",
9
+ "gradio>=6.8.0",
 
10
  "loguru>=0.7.3",
11
  "opencv-python-headless>=4.11.0.86",
12
  "protobuf>=6.30.0",
13
  "sentencepiece>=0.2.0",
14
+ "spaces>=0.47.0",
15
+ "torch==2.9.1",
16
+ "transformers>=5.2.0",
17
  ]
18
 
19
  [tool.ruff]
 
38
  "EM101", # raw-string-in-exception
39
  "FBT001", # boolean-type-hint-positional-argument
40
  "FBT002", # boolean-default-value-positional-argument
41
+ "ISC001", # single-line-implicit-string-concatenation
42
  "PGH003", # blanket-type-ignore
43
  "PLR0913", # too-many-arguments
44
  "PLR0915", # too-many-statements
 
56
 
57
  [tool.ruff.format]
58
  docstring-code-format = true
59
+
60
+ [dependency-groups]
61
+ dev = [
62
+ "mypy>=1.19.1",
63
+ "pre-commit>=4.5.1",
64
+ "ruff>=0.15.4",
65
+ ]
66
+ hf-spaces = [
67
+ "datasets",
68
+ ]
requirements.txt CHANGED
@@ -1,51 +1,77 @@
1
  # This file was autogenerated by uv via the following command:
2
- # uv pip compile pyproject.toml -o requirements.txt
3
- accelerate==1.9.0
4
- # via gemma-3-12b-it (pyproject.toml)
5
  aiofiles==24.1.0
6
  # via gradio
 
 
 
 
 
 
 
 
 
 
7
  annotated-types==0.7.0
8
  # via pydantic
9
- anyio==4.10.0
10
  # via
11
  # gradio
12
  # httpx
13
  # starlette
14
- brotli==1.1.0
 
 
15
  # via gradio
16
- certifi==2025.8.3
 
 
17
  # via
18
  # httpcore
19
  # httpx
20
  # requests
21
- charset-normalizer==3.4.2
22
  # via requests
23
- click==8.2.1
24
  # via
25
  # typer
26
  # uvicorn
27
- exceptiongroup==1.3.0
28
- # via anyio
29
- fastapi==0.116.1
 
 
 
 
 
 
 
 
30
  # via gradio
31
- ffmpy==0.6.1
32
  # via gradio
33
- filelock==3.18.0
34
  # via
 
35
  # huggingface-hub
36
  # torch
37
- # transformers
38
- # triton
39
- fsspec==2025.7.0
40
  # via
 
 
 
 
 
41
  # gradio-client
42
  # huggingface-hub
43
  # torch
44
- gradio==5.40.0
45
  # via
46
- # gemma-3-12b-it (pyproject.toml)
47
  # spaces
48
- gradio-client==1.11.0
49
  # via gradio
50
  groovy==0.1.2
51
  # via gradio
@@ -53,39 +79,41 @@ h11==0.16.0
53
  # via
54
  # httpcore
55
  # uvicorn
56
- hf-transfer==0.1.9
57
- # via gemma-3-12b-it (pyproject.toml)
58
- hf-xet==1.1.5
59
  # via huggingface-hub
60
  httpcore==1.0.9
61
  # via httpx
62
  httpx==0.28.1
63
  # via
 
64
  # gradio
65
  # gradio-client
 
66
  # safehttpx
67
  # spaces
68
- huggingface-hub==0.34.3
69
  # via
70
  # accelerate
 
71
  # gradio
72
  # gradio-client
73
  # tokenizers
74
  # transformers
75
- idna==3.10
76
  # via
77
  # anyio
78
  # httpx
79
  # requests
 
80
  jinja2==3.1.6
81
  # via
82
  # gradio
83
  # torch
84
  loguru==0.7.3
85
- # via gemma-3-12b-it (pyproject.toml)
86
- markdown-it-py==3.0.0
87
  # via rich
88
- markupsafe==3.0.2
89
  # via
90
  # gradio
91
  # jinja2
@@ -93,74 +121,98 @@ mdurl==0.1.2
93
  # via markdown-it-py
94
  mpmath==1.3.0
95
  # via sympy
96
- networkx==3.4.2
 
 
 
 
 
 
97
  # via torch
98
- numpy==2.2.6
99
  # via
100
  # accelerate
 
101
  # gradio
102
  # opencv-python-headless
103
  # pandas
104
  # transformers
105
- nvidia-cublas-cu12==12.1.3.1
106
  # via
107
  # nvidia-cudnn-cu12
108
  # nvidia-cusolver-cu12
109
  # torch
110
- nvidia-cuda-cupti-cu12==12.1.105
 
 
111
  # via torch
112
- nvidia-cuda-nvrtc-cu12==12.1.105
113
  # via torch
114
- nvidia-cuda-runtime-cu12==12.1.105
115
  # via torch
116
- nvidia-cudnn-cu12==9.1.0.70
117
  # via torch
118
- nvidia-cufft-cu12==11.0.2.54
119
  # via torch
120
- nvidia-curand-cu12==10.3.2.106
121
  # via torch
122
- nvidia-cusolver-cu12==11.4.5.107
123
  # via torch
124
- nvidia-cusparse-cu12==12.1.0.106
125
  # via
126
  # nvidia-cusolver-cu12
127
  # torch
128
- nvidia-nccl-cu12==2.20.5
129
  # via torch
130
- nvidia-nvjitlink-cu12==12.9.86
 
 
131
  # via
 
132
  # nvidia-cusolver-cu12
133
  # nvidia-cusparse-cu12
134
- nvidia-nvtx-cu12==12.1.105
 
135
  # via torch
136
- opencv-python-headless==4.12.0.88
137
- # via gemma-3-12b-it (pyproject.toml)
138
- orjson==3.11.1
 
 
139
  # via gradio
140
- packaging==25.0
141
  # via
142
  # accelerate
 
143
  # gradio
144
  # gradio-client
145
  # huggingface-hub
146
  # spaces
147
  # transformers
148
- pandas==2.3.1
149
- # via gradio
150
- pillow==11.3.0
 
 
151
  # via gradio
152
- protobuf==6.31.1
153
- # via gemma-3-12b-it (pyproject.toml)
 
 
 
 
154
  psutil==5.9.8
155
  # via
156
  # accelerate
157
  # spaces
158
- pydantic==2.11.7
 
 
159
  # via
160
  # fastapi
161
  # gradio
162
  # spaces
163
- pydantic-core==2.33.2
164
  # via pydantic
165
  pydub==0.25.1
166
  # via gradio
@@ -168,73 +220,75 @@ pygments==2.19.2
168
  # via rich
169
  python-dateutil==2.9.0.post0
170
  # via pandas
171
- python-multipart==0.0.20
172
  # via gradio
173
  pytz==2025.2
174
- # via pandas
175
- pyyaml==6.0.2
176
  # via
177
  # accelerate
 
178
  # gradio
179
  # huggingface-hub
180
  # transformers
181
- regex==2025.7.34
182
  # via transformers
183
- requests==2.32.4
184
  # via
185
- # huggingface-hub
186
  # spaces
187
- # transformers
188
- rich==14.1.0
189
  # via typer
190
- ruff==0.12.7
191
  # via gradio
192
- safehttpx==0.1.6
193
- # via gradio
194
- safetensors==0.5.3
195
  # via
196
  # accelerate
197
  # transformers
198
  semantic-version==2.10.0
199
  # via gradio
200
- sentencepiece==0.2.0
201
- # via gemma-3-12b-it (pyproject.toml)
 
 
202
  shellingham==1.5.4
203
  # via typer
204
  six==1.17.0
205
  # via python-dateutil
206
- sniffio==1.3.1
207
- # via anyio
208
- spaces==0.39.0
209
- # via gemma-3-12b-it (pyproject.toml)
210
- starlette==0.47.2
211
  # via
212
  # fastapi
213
  # gradio
214
  sympy==1.14.0
215
  # via torch
216
- tokenizers==0.21.4
217
  # via transformers
218
  tomlkit==0.13.3
219
  # via gradio
220
- torch==2.4.0
221
  # via
222
- # gemma-3-12b-it (pyproject.toml)
223
  # accelerate
224
- tqdm==4.67.1
 
225
  # via
 
226
  # huggingface-hub
227
  # transformers
228
- transformers==4.51.3
229
- # via gemma-3-12b-it (pyproject.toml)
230
- triton==3.0.0
231
  # via torch
232
- typer==0.16.0
233
- # via gradio
234
- typing-extensions==4.14.1
235
  # via
 
 
 
 
 
 
236
  # anyio
237
- # exceptiongroup
238
  # fastapi
239
  # gradio
240
  # gradio-client
@@ -244,16 +298,20 @@ typing-extensions==4.14.1
244
  # spaces
245
  # starlette
246
  # torch
247
- # typer
248
  # typing-inspection
249
- # uvicorn
250
- typing-inspection==0.4.1
251
- # via pydantic
252
- tzdata==2025.2
 
253
  # via pandas
254
- urllib3==2.5.0
255
  # via requests
256
- uvicorn==0.35.0
257
  # via gradio
258
- websockets==15.0.1
259
- # via gradio-client
 
 
 
 
 
1
  # This file was autogenerated by uv via the following command:
2
+ # uv export --no-hashes --no-dev --group hf-spaces --no-emit-package typer-slim -o requirements.txt
3
+ accelerate==1.12.0
4
+ # via gemma-3-12b-it
5
  aiofiles==24.1.0
6
  # via gradio
7
+ aiohappyeyeballs==2.6.1
8
+ # via aiohttp
9
+ aiohttp==3.13.3
10
+ # via fsspec
11
+ aiosignal==1.4.0
12
+ # via aiohttp
13
+ annotated-doc==0.0.4
14
+ # via
15
+ # fastapi
16
+ # typer
17
  annotated-types==0.7.0
18
  # via pydantic
19
+ anyio==4.12.1
20
  # via
21
  # gradio
22
  # httpx
23
  # starlette
24
+ attrs==25.4.0
25
+ # via aiohttp
26
+ audioop-lts==0.2.2 ; python_full_version >= '3.13'
27
  # via gradio
28
+ brotli==1.2.0
29
+ # via gradio
30
+ certifi==2026.2.25
31
  # via
32
  # httpcore
33
  # httpx
34
  # requests
35
+ charset-normalizer==3.4.4
36
  # via requests
37
+ click==8.3.1
38
  # via
39
  # typer
40
  # uvicorn
41
+ colorama==0.4.6 ; sys_platform == 'win32'
42
+ # via
43
+ # click
44
+ # loguru
45
+ # tqdm
46
+ datasets==4.6.1
47
+ dill==0.4.0
48
+ # via
49
+ # datasets
50
+ # multiprocess
51
+ fastapi==0.135.1
52
  # via gradio
53
+ ffmpy==1.0.0
54
  # via gradio
55
+ filelock==3.25.0
56
  # via
57
+ # datasets
58
  # huggingface-hub
59
  # torch
60
+ frozenlist==1.8.0
 
 
61
  # via
62
+ # aiohttp
63
+ # aiosignal
64
+ fsspec==2026.2.0
65
+ # via
66
+ # datasets
67
  # gradio-client
68
  # huggingface-hub
69
  # torch
70
+ gradio==6.8.0
71
  # via
72
+ # gemma-3-12b-it
73
  # spaces
74
+ gradio-client==2.2.0
75
  # via gradio
76
  groovy==0.1.2
77
  # via gradio
 
79
  # via
80
  # httpcore
81
  # uvicorn
82
+ hf-xet==1.3.2 ; platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
 
 
83
  # via huggingface-hub
84
  httpcore==1.0.9
85
  # via httpx
86
  httpx==0.28.1
87
  # via
88
+ # datasets
89
  # gradio
90
  # gradio-client
91
+ # huggingface-hub
92
  # safehttpx
93
  # spaces
94
+ huggingface-hub==1.5.0
95
  # via
96
  # accelerate
97
+ # datasets
98
  # gradio
99
  # gradio-client
100
  # tokenizers
101
  # transformers
102
+ idna==3.11
103
  # via
104
  # anyio
105
  # httpx
106
  # requests
107
+ # yarl
108
  jinja2==3.1.6
109
  # via
110
  # gradio
111
  # torch
112
  loguru==0.7.3
113
+ # via gemma-3-12b-it
114
+ markdown-it-py==4.0.0
115
  # via rich
116
+ markupsafe==3.0.3
117
  # via
118
  # gradio
119
  # jinja2
 
121
  # via markdown-it-py
122
  mpmath==1.3.0
123
  # via sympy
124
+ multidict==6.7.1
125
+ # via
126
+ # aiohttp
127
+ # yarl
128
+ multiprocess==0.70.18
129
+ # via datasets
130
+ networkx==3.6.1
131
  # via torch
132
+ numpy==2.4.2
133
  # via
134
  # accelerate
135
+ # datasets
136
  # gradio
137
  # opencv-python-headless
138
  # pandas
139
  # transformers
140
+ nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
141
  # via
142
  # nvidia-cudnn-cu12
143
  # nvidia-cusolver-cu12
144
  # torch
145
+ nvidia-cuda-cupti-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
146
+ # via torch
147
+ nvidia-cuda-nvrtc-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
148
  # via torch
149
+ nvidia-cuda-runtime-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
150
  # via torch
151
+ nvidia-cudnn-cu12==9.10.2.21 ; platform_machine == 'x86_64' and sys_platform == 'linux'
152
  # via torch
153
+ nvidia-cufft-cu12==11.3.3.83 ; platform_machine == 'x86_64' and sys_platform == 'linux'
154
  # via torch
155
+ nvidia-cufile-cu12==1.13.1.3 ; platform_machine == 'x86_64' and sys_platform == 'linux'
156
  # via torch
157
+ nvidia-curand-cu12==10.3.9.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
158
  # via torch
159
+ nvidia-cusolver-cu12==11.7.3.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
160
  # via torch
161
+ nvidia-cusparse-cu12==12.5.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
162
  # via
163
  # nvidia-cusolver-cu12
164
  # torch
165
+ nvidia-cusparselt-cu12==0.7.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
166
  # via torch
167
+ nvidia-nccl-cu12==2.27.5 ; platform_machine == 'x86_64' and sys_platform == 'linux'
168
+ # via torch
169
+ nvidia-nvjitlink-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
170
  # via
171
+ # nvidia-cufft-cu12
172
  # nvidia-cusolver-cu12
173
  # nvidia-cusparse-cu12
174
+ # torch
175
+ nvidia-nvshmem-cu12==3.3.20 ; platform_machine == 'x86_64' and sys_platform == 'linux'
176
  # via torch
177
+ nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
178
+ # via torch
179
+ opencv-python-headless==4.13.0.92
180
+ # via gemma-3-12b-it
181
+ orjson==3.11.7
182
  # via gradio
183
+ packaging==26.0
184
  # via
185
  # accelerate
186
+ # datasets
187
  # gradio
188
  # gradio-client
189
  # huggingface-hub
190
  # spaces
191
  # transformers
192
+ pandas==3.0.1
193
+ # via
194
+ # datasets
195
+ # gradio
196
+ pillow==12.1.1
197
  # via gradio
198
+ propcache==0.4.1
199
+ # via
200
+ # aiohttp
201
+ # yarl
202
+ protobuf==7.34.0
203
+ # via gemma-3-12b-it
204
  psutil==5.9.8
205
  # via
206
  # accelerate
207
  # spaces
208
+ pyarrow==23.0.1
209
+ # via datasets
210
+ pydantic==2.12.5
211
  # via
212
  # fastapi
213
  # gradio
214
  # spaces
215
+ pydantic-core==2.41.5
216
  # via pydantic
217
  pydub==0.25.1
218
  # via gradio
 
220
  # via rich
221
  python-dateutil==2.9.0.post0
222
  # via pandas
223
+ python-multipart==0.0.22
224
  # via gradio
225
  pytz==2025.2
226
+ # via gradio
227
+ pyyaml==6.0.3
228
  # via
229
  # accelerate
230
+ # datasets
231
  # gradio
232
  # huggingface-hub
233
  # transformers
234
+ regex==2026.2.28
235
  # via transformers
236
+ requests==2.32.5
237
  # via
238
+ # datasets
239
  # spaces
240
+ rich==14.3.3
 
241
  # via typer
242
+ safehttpx==0.1.7
243
  # via gradio
244
+ safetensors==0.7.0
 
 
245
  # via
246
  # accelerate
247
  # transformers
248
  semantic-version==2.10.0
249
  # via gradio
250
+ sentencepiece==0.2.1
251
+ # via gemma-3-12b-it
252
+ setuptools==82.0.0
253
+ # via torch
254
  shellingham==1.5.4
255
  # via typer
256
  six==1.17.0
257
  # via python-dateutil
258
+ spaces==0.47.0
259
+ # via gemma-3-12b-it
260
+ starlette==0.52.1
 
 
261
  # via
262
  # fastapi
263
  # gradio
264
  sympy==1.14.0
265
  # via torch
266
+ tokenizers==0.22.2
267
  # via transformers
268
  tomlkit==0.13.3
269
  # via gradio
270
+ torch==2.9.1
271
  # via
 
272
  # accelerate
273
+ # gemma-3-12b-it
274
+ tqdm==4.67.3
275
  # via
276
+ # datasets
277
  # huggingface-hub
278
  # transformers
279
+ transformers==5.2.0
280
+ # via gemma-3-12b-it
281
+ triton==3.5.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
282
  # via torch
283
+ typer==0.24.1
 
 
284
  # via
285
+ # gradio
286
+ # huggingface-hub
287
+ # typer-slim
288
+ typing-extensions==4.15.0
289
+ # via
290
+ # aiosignal
291
  # anyio
 
292
  # fastapi
293
  # gradio
294
  # gradio-client
 
298
  # spaces
299
  # starlette
300
  # torch
 
301
  # typing-inspection
302
+ typing-inspection==0.4.2
303
+ # via
304
+ # fastapi
305
+ # pydantic
306
+ tzdata==2025.3 ; sys_platform == 'emscripten' or sys_platform == 'win32'
307
  # via pandas
308
+ urllib3==2.6.3
309
  # via requests
310
+ uvicorn==0.41.0
311
  # via gradio
312
+ win32-setctime==1.2.0 ; sys_platform == 'win32'
313
+ # via loguru
314
+ xxhash==3.6.0
315
+ # via datasets
316
+ yarl==1.23.0
317
+ # via aiohttp
uv.lock CHANGED
The diff for this file is too large to render. See raw diff