Graf-J commited on
Commit
2959f55
·
verified ·
1 Parent(s): b32cabd

Initial Commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Nunito.ttf filter=lfs diff=lfs merge=lfs -text
Dockerfile CHANGED
@@ -1,20 +1,20 @@
1
- FROM python:3.13.5-slim
2
-
3
- WORKDIR /app
4
-
5
- RUN apt-get update && apt-get install -y \
6
- build-essential \
7
- curl \
8
- git \
9
- && rm -rf /var/lib/apt/lists/*
10
-
11
- COPY requirements.txt ./
12
- COPY src/ ./src/
13
-
14
- RUN pip3 install -r requirements.txt
15
-
16
- EXPOSE 8501
17
-
18
- HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
19
-
20
- ENTRYPOINT ["streamlit", "run", "src/streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"]
 
1
+ FROM python:3.13.5-slim
2
+
3
+ WORKDIR /app
4
+
5
+ RUN apt-get update && apt-get install -y \
6
+ build-essential \
7
+ curl \
8
+ git \
9
+ && rm -rf /var/lib/apt/lists/*
10
+
11
+ COPY requirements.txt ./
12
+ COPY app.py ./
13
+
14
+ RUN pip3 install -r requirements.txt
15
+
16
+ EXPOSE 8501
17
+
18
+ HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
19
+
20
+ ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
Nunito.ttf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2a6ab02dcefcf4c7481e92ffb49ad0c7bc7a19ccd18eb5d7d9f4e21211998c6
3
+ size 275644
app.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import time
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import torchvision.transforms as transforms
6
+ import torchvision.transforms.functional as F_vision
7
+ from PIL import Image
8
+ from transformers import AutoModel, AutoProcessor, pipeline
9
+ from captcha.image import ImageCaptcha
10
+ import io
11
+ import matplotlib.pyplot as plt
12
+ import numpy as np
13
+ from typing import Optional
14
+ from st_keyup import st_keyup
15
+
16
+ st.set_page_config(page_title="CAPTCHA Model Showcase", layout="wide")
17
+
18
+ st.title("CAPTCHA Models Showcase")
19
+
20
+ st.markdown("""
21
+ Explore generation with various text and augmentations, or test the models with your own images!
22
+ """)
23
+
24
+ # --- Models configuration ---
25
+ @st.cache_resource
26
+ def load_finetuned_models():
27
+ # Cache all models
28
+ return {
29
+ "Graf-J/captcha-conv-transformer-finetuned": {
30
+ "Architecture": "Convolutional Transformer",
31
+ "Training Data": "hammer888/captcha-data",
32
+ "Python Captcha Library": "Included",
33
+ "Parameters": "12,279,551",
34
+ "Model Size": "51.7 MB",
35
+ "Sequence Accuracy (Python Captcha)": "88.42%",
36
+ "CER (Python Captcha)": "2.08%",
37
+ "Link": "https://huggingface.co/Graf-J/captcha-conv-transformer-finetuned"
38
+ },
39
+ "Graf-J/captcha-crnn-finetuned": {
40
+ "Architecture": "CRNN",
41
+ "Training Data": "hammer888/captcha-data",
42
+ "Python Captcha Library": "Included",
43
+ "Parameters": "3,570,943",
44
+ "Model Size": "14.3 MB",
45
+ "Sequence Accuracy (Python Captcha)": "86.20%",
46
+ "CER (Python Captcha)": "2.53%",
47
+ "Link": "https://huggingface.co/Graf-J/captcha-crnn-finetuned"
48
+ }
49
+ }
50
+
51
+ MODELS_FINETUNED = load_finetuned_models()
52
+
53
+ @st.cache_resource
54
+ def load_all_models_hammer_stats():
55
+ # Base and Finetuned models with hammer888 metrics for Section 2
56
+ return {
57
+ "Graf-J/captcha-conv-transformer-base": {
58
+ "Architecture": "Convolutional Transformer",
59
+ "Training Data": "hammer888/captcha-data",
60
+ "Parameters": "12,279,551",
61
+ "Model Size": "51.7 MB",
62
+ "Sequence Accuracy (hammer888)": "97.38%",
63
+ "CER (hammer888)": "0.57%",
64
+ "Link": "https://huggingface.co/Graf-J/captcha-conv-transformer-base"
65
+ },
66
+ "Graf-J/captcha-crnn-base": {
67
+ "Architecture": "CRNN",
68
+ "Training Data": "hammer888/captcha-data",
69
+ "Parameters": "3,570,943",
70
+ "Model Size": "14.3 MB",
71
+ "Sequence Accuracy (hammer888)": "96.81%",
72
+ "CER (hammer888)": "0.70%",
73
+ "Link": "https://huggingface.co/Graf-J/captcha-crnn-base"
74
+ },
75
+ "Graf-J/captcha-conv-transformer-finetuned": {
76
+ "Architecture": "Convolutional Transformer",
77
+ "Training Data": "hammer888/captcha-data + Python Captcha",
78
+ "Parameters": "12,279,551",
79
+ "Model Size": "51.7 MB",
80
+ "Sequence Accuracy (hammer888)": "95.36%",
81
+ "CER (hammer888)": "1.03%",
82
+ "Link": "https://huggingface.co/Graf-J/captcha-conv-transformer-finetuned"
83
+ },
84
+ "Graf-J/captcha-crnn-finetuned": {
85
+ "Architecture": "CRNN",
86
+ "Training Data": "hammer888/captcha-data + Python Captcha",
87
+ "Parameters": "3,570,943",
88
+ "Model Size": "14.3 MB",
89
+ "Sequence Accuracy (hammer888)": "92.98%",
90
+ "CER (hammer888)": "1.59%",
91
+ "Link": "https://huggingface.co/Graf-J/captcha-crnn-finetuned"
92
+ },
93
+ }
94
+
95
+ ALL_MODELS = load_all_models_hammer_stats()
96
+
97
+ @st.cache_resource
98
+ def get_model_pipeline(model_id):
99
+ return pipeline(task="captcha-recognition", model=model_id, trust_remote_code=True)
100
+
101
+ @st.cache_resource
102
+ def get_custom_model(model_id):
103
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
104
+ model = AutoModel.from_pretrained(model_id, trust_remote_code=True)
105
+ model.eval()
106
+ return processor, model
107
+
108
+
109
+ def predict(image, model_id):
110
+ model_info = ALL_MODELS.get(model_id)
111
+ if not model_info:
112
+ raise ValueError("Model not found")
113
+
114
+ processor, model = get_custom_model(model_id)
115
+ inputs = processor(images=image)
116
+ with torch.no_grad():
117
+ outputs = model(inputs["pixel_values"])
118
+ logits = outputs.logits
119
+
120
+ # CTC Decode
121
+ prediction = processor.batch_decode(logits)[0]
122
+
123
+ # Calculate confidences (simplified for display)
124
+ probs = F.softmax(logits, dim=-1)
125
+ max_probs, _ = torch.max(probs, dim=-1)
126
+ # We take the mean confidence across the sequence as an example,
127
+ # or we can just return the raw string if character level is too complex without alignment.
128
+ confidence = max_probs[0].mean().item()
129
+ return prediction, confidence
130
+
131
+ def apply_transformations(img: Image.Image, rotation: float, alpha: float, seed: Optional[int] = None) -> Image.Image:
132
+ """Applies rotation and elastic distortion to a PIL image using torchvision."""
133
+ distorted_raw = img
134
+
135
+ # Needs to be a tensor for transforms if it expects tensor
136
+ if alpha > 0:
137
+ if seed is not None:
138
+ torch.manual_seed(seed)
139
+
140
+ # Use standard ElasticTransform, it expects a tensor.
141
+ elasticter = transforms.ElasticTransform(alpha=float(alpha), sigma=9.0, fill=255)
142
+ tensor_img = transforms.ToTensor()(distorted_raw)
143
+ distorted_tensor = elasticter(tensor_img)
144
+ # Convert back to PIL Image, but ToPILImage expects C x H x W in [0, 1] range
145
+ # Some older elastic transforms might not fill correctly with 255 if tensor is 0-1.
146
+ distorted_raw = transforms.ToPILImage()(distorted_tensor)
147
+
148
+ if rotation != 0:
149
+ distorted_raw = F_vision.rotate(distorted_raw, float(rotation), fill=255)
150
+
151
+ if alpha > 0:
152
+ crop_amount = int(alpha / 35)
153
+ width, height = distorted_raw.size
154
+ # Crop: (left, upper, right, lower)
155
+ distorted_raw = distorted_raw.crop((crop_amount, 0, width - crop_amount, height))
156
+
157
+ return distorted_raw
158
+
159
+
160
+ st.header("1. CAPTCHA Generation & Inference")
161
+
162
+ col1, col2 = st.columns([1, 2])
163
+
164
+ with col1:
165
+ selected_model_1 = st.selectbox("Select Model", list(MODELS_FINETUNED.keys()), key="model_sec1")
166
+
167
+ st.markdown("**Model Statistics:**")
168
+ stats = {k: v for k, v in MODELS_FINETUNED[selected_model_1].items() if k != "type"}
169
+ st.table({
170
+ "Metric": list(stats.keys()),
171
+ "Value": list(stats.values())
172
+ })
173
+
174
+ with col2:
175
+ st.subheader("Generate CAPTCHA")
176
+
177
+ # Session state for caching the base image
178
+ if "base_captcha_image" not in st.session_state:
179
+ st.session_state.base_captcha_image = None
180
+ if "last_captcha_text" not in st.session_state:
181
+ st.session_state.last_captcha_text = ""
182
+ if "distortion_seed" not in st.session_state:
183
+ st.session_state.distortion_seed = torch.randint(0, 1000000, (1,)).item()
184
+
185
+ input_col, btn_col = st.columns([3, 1])
186
+ with input_col:
187
+ captcha_val = st_keyup("Enter text (1-8 alphanumeric chars)", value="aZ93eiL", debounce=300)
188
+ captcha_text: str = str(captcha_val) if captcha_val is not None else ""
189
+ with btn_col:
190
+ st.write("")
191
+ st.write("")
192
+ regen_btn = st.button("🔄 Regenerate Image")
193
+
194
+ slider_col1, slider_col2 = st.columns(2)
195
+ with slider_col1:
196
+ rotation = st.slider("Rotation (-15 to 15)", -15, 15, 0)
197
+ with slider_col2:
198
+ distortion = st.slider("Distortion Alpha (0 to 100)", 0, 100, 0)
199
+
200
+ if not captcha_text.isalnum():
201
+ st.error("Text must be alphanumeric!")
202
+ elif not (1 <= len(captcha_text) <= 8):
203
+ st.error("Text must be between 1 and 8 characters!")
204
+ else:
205
+ try:
206
+ # Check if we need to generate a new base image
207
+ if st.session_state.base_captcha_image is None or st.session_state.last_captcha_text != captcha_text or regen_btn:
208
+ generator = ImageCaptcha(fonts=["Nunito.ttf"])
209
+ st.session_state.base_captcha_image = generator.generate_image(captcha_text)
210
+ st.session_state.last_captcha_text = captcha_text
211
+ st.session_state.distortion_seed = torch.randint(0, 1000000, (1,)).item()
212
+
213
+ img = st.session_state.base_captcha_image
214
+
215
+ # Apply User's Transformation Logic
216
+ transformed_img = apply_transformations(img, rotation, distortion, st.session_state.distortion_seed)
217
+
218
+ # Predict
219
+ pred_text, conf = predict(transformed_img, selected_model_1)
220
+
221
+ # Display Side by Side (Input vs Prediction)
222
+ res_col1, res_col2 = st.columns(2)
223
+ with res_col1:
224
+ st.image(transformed_img, caption=f"Original: '{captcha_text}'", use_container_width=True)
225
+
226
+ with res_col2:
227
+ # Character-level coloring
228
+ html_chars = []
229
+ def char_at(s: str, idx: int) -> str:
230
+ return s[idx] if idx < len(s) else ""
231
+
232
+ for i, p_char in enumerate(pred_text):
233
+ color = "green" if p_char == char_at(str(captcha_text), i) else "red"
234
+ html_chars.append(f"<span style='color: {color};'>{p_char}</span>")
235
+ colored_pred = "".join(html_chars)
236
+
237
+ st.markdown(f"<h3 style='text-align: center;'>Model Prediction:</h3>", unsafe_allow_html=True)
238
+ st.markdown(f"<h1 style='text-align: center;'><b>{colored_pred}</b></h1>", unsafe_allow_html=True)
239
+ if conf is not None:
240
+ st.markdown(f"<p style='text-align: center; color: gray;'>Avg Confidence: {conf:.2%}</p>", unsafe_allow_html=True)
241
+
242
+ except Exception as e:
243
+ st.error(f"Error during prediction: {e}")
244
+
245
+ st.divider()
246
+
247
+ st.header("2. Upload & Test")
248
+
249
+ import os
250
+ # Load all available images from the images directory
251
+ import glob
252
+
253
+ image_files = glob.glob("images/*.jpg") + glob.glob("images/*.png")
254
+
255
+ col_sec2_1, col_sec2_2 = st.columns([1, 2])
256
+
257
+ with col_sec2_1:
258
+ selected_model_2 = st.selectbox("Select Model", list(ALL_MODELS.keys()), key="model_sec2")
259
+
260
+ st.markdown("**Model Statistics:**")
261
+ stats_2 = {k: v for k, v in ALL_MODELS[selected_model_2].items() if k != "type"}
262
+ st.table({
263
+ "Metric": list(stats_2.keys()),
264
+ "Value": list(stats_2.values())
265
+ })
266
+
267
+ with st.expander("Show Example Images"):
268
+ st.markdown("Drag and Drop one of these images into the uploader above!")
269
+
270
+ # Display in a grid of 3 columns
271
+ with st.container(height=400):
272
+ cols = st.columns(3)
273
+ for i, img_path in enumerate(image_files):
274
+ with cols[i % 3]:
275
+ st.image(img_path, use_container_width=True)
276
+
277
+ with col_sec2_2:
278
+ st.subheader("Upload an image")
279
+ uploaded_file = st.file_uploader("Choose an image file", type=["png", "jpg", "jpeg"], key="test_uploader")
280
+
281
+ image_to_predict = None
282
+
283
+ if uploaded_file is not None:
284
+ image_to_predict = Image.open(uploaded_file).convert("RGB")
285
+ try:
286
+ pred_text, conf = predict(image_to_predict, selected_model_2)
287
+
288
+ # Check for ground truth in filename depending on source
289
+ ground_truth = None
290
+ if uploaded_file is not None:
291
+ # Strip extension and check if it acts as a GT
292
+ base_name = os.path.splitext(uploaded_file.name)[0]
293
+ # We assume it's GT if it's alphanumeric and matches acceptable length (1-10)
294
+ if isinstance(base_name, str):
295
+ if base_name.isalnum() and 1 <= len(base_name) <= 10:
296
+ ground_truth = base_name
297
+
298
+ # Display Side by Side (Input vs Prediction)
299
+ res2_col1, res2_col2 = st.columns(2)
300
+ with res2_col1:
301
+ st.image(image_to_predict, caption="Uploaded Image", use_container_width=True)
302
+
303
+ with res2_col2:
304
+ # Render logic
305
+ if ground_truth:
306
+ html_chars = []
307
+ def char_at(s: str, idx: int) -> str:
308
+ return s[idx] if idx < len(s) else ""
309
+
310
+ for i, p_char in enumerate(pred_text):
311
+ color = "green" if p_char == char_at(ground_truth, i) else "red"
312
+ html_chars.append(f"<span style='color: {color};'>{p_char}</span>")
313
+ colored_pred = "".join(html_chars)
314
+
315
+ st.markdown(f"<h3 style='text-align: center;'>Model Prediction:</h3>", unsafe_allow_html=True)
316
+ st.markdown(f"<h1 style='text-align: center;'><b>{colored_pred}</b></h1>", unsafe_allow_html=True)
317
+ else:
318
+ st.markdown(f"<h3 style='text-align: center;'>Model Prediction:</h3>", unsafe_allow_html=True)
319
+ st.markdown(f"<h1 style='text-align: center;'><b>{pred_text}</b></h1>", unsafe_allow_html=True)
320
+
321
+ if conf is not None:
322
+ st.markdown(f"<p style='text-align: center; color: gray;'>Avg Confidence: {conf:.2%}</p>", unsafe_allow_html=True)
323
+
324
+ except Exception as e:
325
+ st.error(f"Error during prediction: {e}")
images/3eplzv.jpg ADDED
images/46CN5W.jpg ADDED
images/5820.jpg ADDED
images/6521.jpg ADDED
images/67qas.jpg ADDED
images/75ke.jpg ADDED
images/8JKM.jpg ADDED
images/8jpwt0.jpg ADDED
images/B1QAZ6.jpg ADDED
images/CCX8.jpg ADDED
images/EPOD.jpg ADDED
images/ER6Y.jpg ADDED
images/EWSP.jpg ADDED
images/GIOGp.jpg ADDED
images/HCDS.jpg ADDED
images/JBWkEs.jpg ADDED
images/KKh8Q.jpg ADDED
images/MFMH.jpg ADDED
images/NJSEX.jpg ADDED
images/R6AB.jpg ADDED
images/TVHF.jpg ADDED
images/Vb4cG.jpg ADDED
images/XaNqQx.jpg ADDED
images/YULM.jpg ADDED
images/abfsh.jpg ADDED
images/b6yc.jpg ADDED
images/bCWaLR.jpg ADDED
images/d3no.jpg ADDED
images/iq1sZo.jpg ADDED
images/kJtOfk.jpg ADDED
requirements.txt CHANGED
@@ -1,3 +1,210 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml --output-file requirements.txt
3
+ altair==6.0.0
4
+ # via streamlit
5
+ annotated-doc==0.0.4
6
+ # via typer
7
+ anyio==4.12.1
8
+ # via httpx
9
+ attrs==25.4.0
10
+ # via
11
+ # jsonschema
12
+ # referencing
13
+ blinker==1.9.0
14
+ # via streamlit
15
+ cachetools==6.2.6
16
+ # via streamlit
17
+ captcha==0.7.1
18
+ # via captcha-website (pyproject.toml)
19
+ certifi==2026.2.25
20
+ # via
21
+ # httpcore
22
+ # httpx
23
+ # requests
24
+ charset-normalizer==3.4.4
25
+ # via requests
26
+ click==8.3.1
27
+ # via
28
+ # streamlit
29
+ # typer
30
+ colorama==0.4.6
31
+ # via
32
+ # click
33
+ # tqdm
34
+ contourpy==1.3.3
35
+ # via matplotlib
36
+ cycler==0.12.1
37
+ # via matplotlib
38
+ filelock==3.24.3
39
+ # via
40
+ # huggingface-hub
41
+ # torch
42
+ fonttools==4.61.1
43
+ # via matplotlib
44
+ fsspec==2026.2.0
45
+ # via
46
+ # huggingface-hub
47
+ # torch
48
+ gitdb==4.0.12
49
+ # via gitpython
50
+ gitpython==3.1.46
51
+ # via streamlit
52
+ h11==0.16.0
53
+ # via httpcore
54
+ hf-xet==1.3.2
55
+ # via huggingface-hub
56
+ httpcore==1.0.9
57
+ # via httpx
58
+ httpx==0.28.1
59
+ # via huggingface-hub
60
+ huggingface-hub==1.5.0
61
+ # via
62
+ # tokenizers
63
+ # transformers
64
+ idna==3.11
65
+ # via
66
+ # anyio
67
+ # httpx
68
+ # requests
69
+ jinja2==3.1.6
70
+ # via
71
+ # altair
72
+ # pydeck
73
+ # streamlit-keyup
74
+ # torch
75
+ jsonschema==4.26.0
76
+ # via altair
77
+ jsonschema-specifications==2025.9.1
78
+ # via jsonschema
79
+ kiwisolver==1.4.9
80
+ # via matplotlib
81
+ markdown-it-py==4.0.0
82
+ # via rich
83
+ markupsafe==3.0.3
84
+ # via jinja2
85
+ matplotlib==3.10.8
86
+ # via captcha-website (pyproject.toml)
87
+ mdurl==0.1.2
88
+ # via markdown-it-py
89
+ mpmath==1.3.0
90
+ # via sympy
91
+ narwhals==2.17.0
92
+ # via altair
93
+ networkx==3.6.1
94
+ # via torch
95
+ numpy==2.4.2
96
+ # via
97
+ # captcha-website (pyproject.toml)
98
+ # contourpy
99
+ # matplotlib
100
+ # pandas
101
+ # pydeck
102
+ # streamlit
103
+ # torchvision
104
+ # transformers
105
+ packaging==26.0
106
+ # via
107
+ # altair
108
+ # huggingface-hub
109
+ # matplotlib
110
+ # streamlit
111
+ # transformers
112
+ pandas==2.3.3
113
+ # via streamlit
114
+ pillow==12.1.1
115
+ # via
116
+ # captcha-website (pyproject.toml)
117
+ # captcha
118
+ # matplotlib
119
+ # streamlit
120
+ # torchvision
121
+ protobuf==6.33.5
122
+ # via streamlit
123
+ pyarrow==23.0.1
124
+ # via streamlit
125
+ pydeck==0.9.1
126
+ # via streamlit
127
+ pygments==2.19.2
128
+ # via rich
129
+ pyparsing==3.3.2
130
+ # via matplotlib
131
+ python-dateutil==2.9.0.post0
132
+ # via
133
+ # matplotlib
134
+ # pandas
135
+ pytz==2025.2
136
+ # via pandas
137
+ pyyaml==6.0.3
138
+ # via
139
+ # huggingface-hub
140
+ # transformers
141
+ referencing==0.37.0
142
+ # via
143
+ # jsonschema
144
+ # jsonschema-specifications
145
+ regex==2026.2.28
146
+ # via transformers
147
+ requests==2.32.5
148
+ # via streamlit
149
+ rich==14.3.3
150
+ # via typer
151
+ rpds-py==0.30.0
152
+ # via
153
+ # jsonschema
154
+ # referencing
155
+ safetensors==0.7.0
156
+ # via transformers
157
+ setuptools==82.0.0
158
+ # via torch
159
+ shellingham==1.5.4
160
+ # via typer
161
+ six==1.17.0
162
+ # via python-dateutil
163
+ smmap==5.0.2
164
+ # via gitdb
165
+ streamlit==1.54.0
166
+ # via
167
+ # captcha-website (pyproject.toml)
168
+ # streamlit-keyup
169
+ streamlit-keyup==0.3.0
170
+ # via captcha-website (pyproject.toml)
171
+ sympy==1.14.0
172
+ # via torch
173
+ tenacity==9.1.4
174
+ # via streamlit
175
+ tokenizers==0.22.2
176
+ # via transformers
177
+ toml==0.10.2
178
+ # via streamlit
179
+ torch==2.10.0
180
+ # via
181
+ # captcha-website (pyproject.toml)
182
+ # torchvision
183
+ torchvision==0.25.0
184
+ # via captcha-website (pyproject.toml)
185
+ tornado==6.5.4
186
+ # via streamlit
187
+ tqdm==4.67.3
188
+ # via
189
+ # huggingface-hub
190
+ # transformers
191
+ transformers==5.2.0
192
+ # via captcha-website (pyproject.toml)
193
+ typer==0.24.1
194
+ # via
195
+ # huggingface-hub
196
+ # typer-slim
197
+ typer-slim==0.24.0
198
+ # via transformers
199
+ typing-extensions==4.15.0
200
+ # via
201
+ # altair
202
+ # huggingface-hub
203
+ # streamlit
204
+ # torch
205
+ tzdata==2025.3
206
+ # via pandas
207
+ urllib3==2.6.3
208
+ # via requests
209
+ watchdog==6.0.0
210
+ # via streamlit