1TSnakers commited on
Commit
f73d12e
·
verified ·
1 Parent(s): 3a93188

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +1716 -0
app.py ADDED
@@ -0,0 +1,1716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import List, Optional, Dict, Any, Union
3
+ from fastapi import FastAPI, HTTPException, Request, Query, Path as FastApiPath
4
+ from fastapi.responses import FileResponse, HTMLResponse, JSONResponse # Import FileResponse and HTMLResponse
5
+ from pydantic import BaseModel, Field, HttpUrl
6
+ import requests
7
+ import requests_cache
8
+ from datetime import datetime, timezone, timedelta
9
+ import uvicorn
10
+ from bs4 import BeautifulSoup
11
+ from urllib.parse import urlparse, urljoin
12
+ import re
13
+ import os
14
+ import random
15
+ import time
16
+ from collections import deque
17
+ from dotenv import load_dotenv
18
+
19
+ load_dotenv()
20
+
21
+ # --- Configuration ---
22
+
23
+ CODE_VERSION = os.getenv("CODE_VERSION", "Unknown_Release")
24
+ OLLAMA_COM_BASE_URL = os.getenv("OLLAMA_COM_BASE_URL", "https://ollama.com")
25
+ CURRENT_BASE_URL = os.getenv("CURRENT_BASE_URL", "https://example.com")
26
+ STATIC_WEBSITE = os.getenv("STATIC_WEBSITE", "False") == "True" # RECOMMENDED "FALSE"
27
+ CACHE_EXPIRE_AFTER = os.getenv("CACHE_EXPIRE_AFTER", 6) # HOURS
28
+
29
+ # --- Configuration ---
30
+
31
+
32
+ # Cache settings: SQLite backend, expires after 6 hours (21600 seconds)
33
+ # Use a simple in-memory cache for demonstration/testing, persistent 'sqlite' is also an option
34
+ cachetime = CACHE_EXPIRE_AFTER * 3600
35
+ requests_cache.install_cache('ollama_com_cache', backend='memory', expire_after=cachetime)
36
+ # Use a CachedSession for all requests to ollama.com
37
+ cached_session = requests_cache.CachedSession()
38
+
39
+ # --- ROOT HTML ---
40
+ dummy_html_content = """
41
+ <!DOCTYPE html>
42
+ <html lang="en">
43
+ <head>
44
+ <meta charset="UTF-8">
45
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
46
+ <title>Ollama API Proxy</title>
47
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap" rel="stylesheet">
48
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.7.2/css/all.min.css">
49
+ <style>
50
+ :root {
51
+ --bg-primary: #0f172a;
52
+ --bg-secondary: #1e293b;
53
+ --bg-third: #111927;
54
+ --accent-primary: #38bdf8;
55
+ --accent-secondary: #7dd3fc;
56
+ --text-primary: #f8fafc;
57
+ --text-secondary: #94a3b8;
58
+ --border-color: #334155;
59
+ --gradient: linear-gradient(135deg, #38bdf8 0%, #7dd3fc 100%);
60
+ }
61
+
62
+ * {
63
+ margin: 0;
64
+ padding: 0;
65
+ box-sizing: border-box;
66
+ }
67
+
68
+ body {
69
+ font-family: 'Inter', sans-serif;
70
+ background-color: var(--bg-primary);
71
+ color: var(--text-primary);
72
+ line-height: 1.6;
73
+ }
74
+
75
+ .container {
76
+ max-width: 1200px;
77
+ margin: 0 auto;
78
+ padding: 2rem;
79
+ }
80
+
81
+ .header {
82
+ text-align: center;
83
+ padding: 4rem 0;
84
+ border-bottom: 1px solid var(--border-color);
85
+ background: var(--bg-secondary);
86
+ margin-bottom: 3rem;
87
+ transition: 0.7s ease-in-out;
88
+ }
89
+
90
+ .title {
91
+ font-size: 2.5rem;
92
+ margin-bottom: 1rem;
93
+ background: var(--gradient);
94
+ -webkit-background-clip: text;
95
+ -webkit-text-fill-color: transparent;
96
+ font-weight: 700;
97
+ }
98
+
99
+ .subtitle {
100
+ color: var(--text-secondary);
101
+ font-size: 1.2rem;
102
+ margin-bottom: 2rem;
103
+ }
104
+
105
+ .nav-links {
106
+ display: flex;
107
+ gap: 1.5rem;
108
+ justify-content: center;
109
+ margin-bottom: 2rem;
110
+ }
111
+
112
+ .nav-link {
113
+ color: var(--accent-primary);
114
+ text-decoration: none;
115
+ font-weight: 500;
116
+ transition: color 0.3s ease;
117
+ padding: 0.5rem 1rem;
118
+ border-radius: 6px;
119
+ transition: 0.6s ease-in-out;
120
+ }
121
+
122
+ .nav-link:hover {
123
+ color: var(--accent-secondary);
124
+ transition: 0.3s ease-in-out;
125
+ background: rgba(56, 189, 248, 0.1);
126
+ }
127
+
128
+ .section {
129
+ margin-bottom: 3rem;
130
+ background: var(--bg-secondary);
131
+ border-radius: 12px;
132
+ padding: 2rem;
133
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
134
+ }
135
+
136
+ .section-title {
137
+ font-size: 1.5rem;
138
+ margin-bottom: 1.5rem;
139
+ color: var(--accent-primary);
140
+ display: flex;
141
+ align-items: center;
142
+ gap: 0.5rem;
143
+ }
144
+
145
+ .endpoint-grid {
146
+ display: grid;
147
+ gap: 1.5rem;
148
+ grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
149
+ }
150
+
151
+ .endpoint-card {
152
+ background: var(--bg-primary);
153
+ border: 1px solid var(--border-color);
154
+ border-radius: 8px;
155
+ padding: 1.5rem;
156
+ transition: transform 0.2s ease;
157
+ }
158
+
159
+ .endpoint-card:hover {
160
+ transform: translateY(-3px);
161
+ }
162
+
163
+ .endpoint-title {
164
+ font-family: 'JetBrains Mono', monospace;
165
+ color: var(--accent-primary);
166
+ margin-bottom: 0.5rem;
167
+ font-size: 0.9rem;
168
+ }
169
+
170
+ .endpoint-description {
171
+ color: var(--text-secondary);
172
+ font-size: 0.9rem;
173
+ }
174
+
175
+ .code-snippet {
176
+ background: var(--bg-primary);
177
+ border: 1px solid var(--border-color);
178
+ border-radius: 6px;
179
+ padding: 1rem;
180
+ font-family: 'JetBrains Mono', monospace;
181
+ font-size: 0.9rem;
182
+ position: relative;
183
+ margin: 1rem 0;
184
+ }
185
+
186
+ .copy-button {
187
+ position: absolute;
188
+ top: 0.5rem;
189
+ right: 0.5rem;
190
+ background: var(--bg-secondary);
191
+ border: none;
192
+ color: var(--text-secondary);
193
+ padding: 0.25rem 0.5rem;
194
+ border-radius: 4px;
195
+ cursor: pointer;
196
+ transition: all 0.2s ease;
197
+ }
198
+
199
+ .copy-button:hover {
200
+ color: var(--accent-primary);
201
+ background: var(--bg-primary);
202
+ }
203
+
204
+ .footer {
205
+ text-align: center;
206
+ padding: 2rem 0;
207
+ border-top: 1px solid var(--border-color);
208
+ margin-top: 3rem;
209
+ color: var(--text-secondary);
210
+ }
211
+
212
+ .social-links {
213
+ display: flex;
214
+ gap: 1rem;
215
+ justify-content: center;
216
+ margin-top: 1rem;
217
+ }
218
+
219
+ .social-link {
220
+ color: var(--text-secondary);
221
+ transition: color 0.3s ease;
222
+ }
223
+
224
+ .social-link:hover {
225
+ color: var(--accent-primary);
226
+ }
227
+
228
+ @media (max-width: 768px) {
229
+ .container {
230
+ padding: 1rem;
231
+ }
232
+
233
+ .title {
234
+ font-size: 2rem;
235
+ }
236
+
237
+ .endpoint-grid {
238
+ grid-template-columns: 1fr;
239
+ }
240
+ }
241
+
242
+ .badge {
243
+ display: inline-block;
244
+ padding: 0.25rem 0.5rem;
245
+ border-radius: 4px;
246
+ font-size: 0.75rem;
247
+ font-weight: 500;
248
+ background: rgba(56, 189, 248, 0.1);
249
+ color: var(--accent-primary);
250
+ margin-left: 0.5rem;
251
+ }
252
+
253
+ .animate-fade-in {
254
+ animation: fadeIn 0.5s ease-in;
255
+ }
256
+
257
+ @keyframes fadeIn {
258
+ from { opacity: 0; transform: translateY(10px); }
259
+ to { opacity: 1; transform: translateY(0); }
260
+ }
261
+
262
+ .apps-grid {
263
+ display: grid;
264
+ gap: 2rem;
265
+ grid-template-columns: repeat(auto-fill, minmax(200px, 1fr));
266
+ }
267
+
268
+ .app-card {
269
+ background: var(--bg-primary);
270
+ border: 1px solid var(--border-color);
271
+ border-radius: 12px;
272
+ padding: 1.5rem;
273
+ text-align: center;
274
+ transition: all 0.3s ease;
275
+ cursor: pointer;
276
+ }
277
+
278
+ .app-card:hover {
279
+ border-color: var(--accent-primary);
280
+ }
281
+
282
+ .app-icon {
283
+ width: 80px;
284
+ height: 80px;
285
+ object-fit: contain;
286
+ margin: 0 auto 1rem;
287
+ border-radius: 16px;
288
+ filter: grayscale(1);
289
+ transition: filter 0.3s ease;
290
+ }
291
+
292
+ .app-card:hover .app-icon {
293
+ filter: grayscale(0);
294
+ }
295
+
296
+ .app-name {
297
+ font-weight: 600;
298
+ margin-bottom: 0.5rem;
299
+ }
300
+
301
+ .app-description {
302
+ color: var(--text-secondary);
303
+ font-size: 0.9rem;
304
+ margin-bottom: 0.5rem;
305
+ }
306
+
307
+ .app-version {
308
+ font-size: 0.75rem;
309
+ color: var(--accent-primary);
310
+ background: rgba(56, 189, 248, 0.1);
311
+ padding: 0.25rem 0.5rem;
312
+ border-radius: 4px;
313
+ display: inline-block;
314
+ }
315
+
316
+ .apps-grid {
317
+ display: grid;
318
+ gap: 2rem;
319
+ grid-template-columns: repeat(4, 1fr);
320
+ max-height: 400px;
321
+ overflow-y: auto;
322
+ }
323
+
324
+ @media (max-width: 1200px) {
325
+ .apps-grid {
326
+ grid-template-columns: repeat(3, 1fr);
327
+ }
328
+ }
329
+
330
+ @media (max-width: 768px) {
331
+ .apps-grid {
332
+ grid-template-columns: repeat(2, 1fr);
333
+ }
334
+ }
335
+
336
+ @media (max-width: 480px) {
337
+ .apps-grid {
338
+ grid-template-columns: 1fr;
339
+ }
340
+ }
341
+
342
+ .submit-form {
343
+ background: var(--bg-primary);
344
+ padding: 2rem;
345
+ border-radius: 12px;
346
+ margin-top: 2rem;
347
+ }
348
+
349
+ .form-group {
350
+ margin-bottom: 1rem;
351
+ }
352
+
353
+ .form-input {
354
+ width: 100%;
355
+ padding: 0.8rem;
356
+ background: var(--bg-secondary);
357
+ border: 1px solid var(--border-color);
358
+ color: var(--text-primary);
359
+ border-radius: 6px;
360
+ margin-top: 0.5rem;
361
+ }
362
+
363
+ .submit-button {
364
+ background: var(--bg-primary);
365
+ color: var(--text-primary);
366
+ border: none;
367
+ padding: 0.8rem 1.5rem;
368
+ border-radius: 6px;
369
+ cursor: pointer;
370
+ transition: opacity 0.3s ease;
371
+ width: 100%;
372
+ }
373
+
374
+ .submit-button:hover {
375
+ opacity: 0.9;
376
+ }
377
+
378
+ .status-message {
379
+ margin-top: 1rem;
380
+ padding: 1rem;
381
+ border-radius: 6px;
382
+ display: none;
383
+ }
384
+
385
+ .success {
386
+ background: rgba(56, 189, 248, 0.1);
387
+ border: 1px solid var(--accent-primary);
388
+ }
389
+
390
+ .error {
391
+ background: rgba(248, 56, 56, 0.1);
392
+ border: 1px solid #f87171;
393
+ }
394
+
395
+ .modal {
396
+ display: none;
397
+ position: fixed;
398
+ top: 0;
399
+ left: 0;
400
+ width: 100%;
401
+ height: 100%;
402
+ background: rgba(15, 23, 42, 0.95);
403
+ z-index: 1000;
404
+ justify-content: center;
405
+ align-items: center;
406
+ animation: fadeIn 0.3s ease-out;
407
+ }
408
+
409
+ .modal-content {
410
+ background: var(--bg-secondary);
411
+ padding: 2rem;
412
+ border-radius: 12px;
413
+ width: 90%;
414
+ max-width: 500px;
415
+ position: relative;
416
+ }
417
+
418
+ .close-modal {
419
+ position: absolute;
420
+ top: 1rem;
421
+ right: 1rem;
422
+ color: var(--text-secondary);
423
+ font-size: 1.5rem;
424
+ cursor: pointer;
425
+ transition: color 0.3s ease;
426
+ }
427
+
428
+ .close-modal:hover {
429
+ color: var(--accent-primary);
430
+ }
431
+
432
+ @keyframes fadeIn {
433
+ from { opacity: 0; }
434
+ to { opacity: 1; }
435
+ }
436
+ </style>
437
+ </head>
438
+ <body>
439
+ <header class="header animate-fade-in">
440
+ <div class="container">
441
+ <h1 class="title">Ollama API Proxy</h1>
442
+ <p class="subtitle">An API that fetches, parses, and caches data from ollama.com.</p>
443
+ <nav class="nav-links">
444
+ <a href="/docs" class="nav-link"><i class="fas fa-code"></i> API Docs</a>
445
+ <a href="/redoc" class="nav-link"><i class="fas fa-book-open"></i> ReDoc</a>
446
+ <a href="https://ollama.com" target="_blank" class="nav-link"><i class="fas fa-external-link-alt"></i> Ollama.com</a>
447
+ </nav>
448
+ </div>
449
+ </header>
450
+
451
+ <main class="container">
452
+ <section class="section animate-fade-in">
453
+ <h2 class="section-title"><i class="fas fa-rocket"></i> Getting Started</h2>
454
+ <div class="code-snippet">
455
+ <button class="copy-button" onclick="navigator.clipboard.writeText('http://localhost:5115/docs')">
456
+ <i class="far fa-copy"></i>
457
+ </button>
458
+ # Explore the API documentation
459
+ $ open http://localhost:5115/docs
460
+ </div>
461
+ </section>
462
+
463
+ <section class="section animate-fade-in">
464
+ <h2 class="section-title"><i class="fas fa-plug"></i> Example Endpoints</h2>
465
+ <div class="endpoint-grid">
466
+ <div class="endpoint-card">
467
+ <div class="endpoint-title">GET /library?o=popular <span class="badge">Default</span></div>
468
+ <p class="endpoint-description">Get popular models from official library</p>
469
+ </div>
470
+
471
+ <div class="endpoint-card">
472
+ <div class="endpoint-title">GET /jmorganca/llama3 <span class="badge">User Model</span></div>
473
+ <p class="endpoint-description">Get details for specific user model</p>
474
+ </div>
475
+
476
+ <div class="endpoint-card">
477
+ <div class="endpoint-title">GET /search?q=mistral <span class="badge">Search</span></div>
478
+ <p class="endpoint-description">Global model search functionality</p>
479
+ </div>
480
+
481
+ <div class="endpoint-card">
482
+ <div class="endpoint-title">GET /.../blobs/model <span class="badge">Blobs</span></div>
483
+ <p class="endpoint-description">Access raw model artifacts</p>
484
+ </div>
485
+ </div>
486
+ </section>
487
+
488
+
489
+ <section class="section animate-fade-in">
490
+ <h2 class="section-title"><i class="fas fa-microchip"></i> System Status</h2>
491
+ <div class="endpoint-grid">
492
+ <div class="endpoint-card">
493
+ <div class="endpoint-title">Cache Status <span class="badge">Live</span></div>
494
+ <p class="endpoint-description">6-hour intelligent caching</p>
495
+ </div>
496
+ <div class="endpoint-card">
497
+ <div class="endpoint-title">Uptime <span class="badge">99.9%</span></div>
498
+ <p class="endpoint-description">High availability service</p>
499
+ </div>
500
+ </div>
501
+ </section>
502
+
503
+ <section class="section animate-fade-in">
504
+ <h2 class="section-title"><i class="fas fa-rocket"></i> Powered Apps</h2>
505
+ <div class="apps-grid" id="apps-container"></div>
506
+ <button class="submit-button" id="openModal" style="margin-top: 1.5rem;">
507
+ <i class="fas fa-plus"></i> Submit Your App
508
+ </button>
509
+ </section>
510
+ </main>
511
+
512
+ <div id="submitModal" class="modal">
513
+ <div class="modal-content">
514
+ <span class="close-modal">&times;</span>
515
+ <h3 class="section-title"><i class="fas fa-plus-circle"></i> Submit Your App</h3>
516
+ <form id="appSubmitForm" onsubmit="return submitApp(event)">
517
+ <div class="form-group">
518
+ <label>App Name</label>
519
+ <input type="text" class="form-input" id="appName" required>
520
+ </div>
521
+ <div class="form-group">
522
+ <label>Website URL</label>
523
+ <input type="url" class="form-input" id="websiteUrl" required>
524
+ </div>
525
+ <button type="submit" class="submit-button">Submit Application</button>
526
+ </form>
527
+ <div id="statusMessage" class="status-message"></div>
528
+ </div>
529
+ </div>
530
+
531
+ <footer class="footer animate-fade-in">
532
+ <div class="container">
533
+ <div class="social-links" style="margin-bottom: 20px;">
534
+ <a href="https://github.com/Houloude9IOfficial/OllamaSearchAPI" class="social-link" target="_blank">
535
+ <i class="fab fa-github"></i>
536
+ </a>
537
+ <a href="https://houloude9.is-a.dev" class="social-link" target="_blank">
538
+ <i class="fa fa-globe"></i>
539
+ </a>
540
+ </div>
541
+ <span id="version" class="badge"></span>
542
+ <p>
543
+ Developed by
544
+ <a href="https://discord.com/users/575254127748317194" target="_blank" rel="noopener noreferrer"
545
+ style="text-decoration: none; font-weight: bold; color: inherit;"
546
+ onmouseover="this.style.textDecoration='underline'"
547
+ onmouseout="this.style.textDecoration='none'">
548
+ Blood Shot
549
+ </a>
550
+ </p>
551
+ <p>
552
+ Maintained by
553
+ <a href="https://discord.com/users/947432701160480828" target="_blank" rel="noopener noreferrer"
554
+ style="text-decoration: none; font-weight: bold; color: inherit;"
555
+ onmouseover="this.style.textDecoration='underline'"
556
+ onmouseout="this.style.textDecoration='none'">
557
+ Houloude9
558
+ </a>
559
+ </p>
560
+ <p>
561
+ Powered by
562
+ <a href="https://koyeb.com" target="_blank" rel="noopener noreferrer"
563
+ style="text-decoration: none; font-weight: bold; color: inherit;"
564
+ onmouseover="this.style.textDecoration='underline'"
565
+ onmouseout="this.style.textDecoration='none'">
566
+ Koyeb
567
+ </a>
568
+ </p>
569
+
570
+ </div>
571
+ </footer>
572
+
573
+ <script>
574
+ function setversion(version) {
575
+ if(!String(String(version).toLowerCase()).startsWith('v')) {
576
+ version = `v${version}`
577
+ }
578
+ document.getElementById('version').textContent = version
579
+ }
580
+ document.querySelectorAll('a[href^="#"]').forEach(anchor => {
581
+ anchor.addEventListener('click', function (e) {
582
+ e.preventDefault();
583
+ document.querySelector(this.getAttribute('href')).scrollIntoView({
584
+ behavior: 'smooth'
585
+ });
586
+ });
587
+ });
588
+
589
+ document.querySelectorAll('.copy-button').forEach(button => {
590
+ button.addEventListener('click', function() {
591
+ const snippet = this.parentElement.textContent.replace('Copy', '').trim();
592
+ navigator.clipboard.writeText(snippet);
593
+
594
+ const originalHTML = this.innerHTML;
595
+ this.innerHTML = '<i class="fas fa-check"></i> Copied!';
596
+
597
+ setTimeout(() => {
598
+ this.innerHTML = originalHTML;
599
+ }, 2000);
600
+ });
601
+ });
602
+ const modal = document.getElementById('submitModal');
603
+ const openBtn = document.getElementById('openModal');
604
+ const closeSpan = document.querySelector('.close-modal');
605
+
606
+ openBtn.onclick = () => modal.style.display = 'flex';
607
+ closeSpan.onclick = () => modal.style.display = 'none';
608
+
609
+ window.onclick = (event) => {
610
+ if (event.target === modal) {
611
+ modal.style.display = 'none';
612
+ }
613
+ }
614
+
615
+ async function submitApp(event) {
616
+ event.preventDefault();
617
+ const name = document.getElementById('appName').value;
618
+ const website = document.getElementById('websiteUrl').value;
619
+ const statusMessage = document.getElementById('statusMessage');
620
+
621
+ try {
622
+ new URL(website);
623
+ } catch {
624
+ statusMessage.textContent = "Please enter a valid URL";
625
+ statusMessage.className = "status-message error";
626
+ statusMessage.style.display = 'block';
627
+ return;
628
+ }
629
+
630
+ try {
631
+ const response = await fetch('https://nextuiserver.htdevs.workers.dev/ollamasearchapi/submit', {
632
+ method: 'POST',
633
+ headers: {
634
+ 'Content-Type': 'application/json',
635
+ },
636
+ body: JSON.stringify({ name, website }),
637
+ });
638
+
639
+ const data = await response.json();
640
+
641
+ if (response.ok) {
642
+ statusMessage.textContent = "App submitted successfully!";
643
+ statusMessage.className = "status-message success";
644
+ document.getElementById('appSubmitForm').reset();
645
+ setTimeout(() => {
646
+ modal.style.display = 'none';
647
+ statusMessage.style.display = 'none';
648
+ }, 2000);
649
+ } else {
650
+ statusMessage.textContent = data.error || "Submission failed";
651
+ statusMessage.className = "status-message error";
652
+ }
653
+ } catch (error) {
654
+ statusMessage.textContent = "Network error - please try again";
655
+ statusMessage.className = "status-message error";
656
+ }
657
+
658
+ statusMessage.style.display = 'block';
659
+ setTimeout(() => {
660
+ statusMessage.style.display = 'none';
661
+ }, 5000);
662
+ }
663
+
664
+ async function loadPoweredApps() {
665
+ try {
666
+ const response = await fetch('https://nextuiserver.htdevs.workers.dev/ollamasearchapi/getapps');
667
+ const data = await response.json();
668
+ const container = document.getElementById('apps-container');
669
+
670
+ container.innerHTML = '';
671
+
672
+ data.apps.forEach(app => {
673
+ const card = document.createElement('div');
674
+ card.className = 'app-card';
675
+ card.onclick = () => window.open(app.url, '_blank');
676
+ const fullversion = app.version ? `<div class="app-version">${app.version}</div>` : '';
677
+
678
+ card.innerHTML = `
679
+ <img src="${app.icon}" class="app-icon" alt="${app.name}">
680
+ <div class="app-name">${app.name}</div>
681
+ <div class="app-description">${app.description}</div>
682
+ ${fullversion}
683
+ `;
684
+
685
+ container.appendChild(card);
686
+ });
687
+ } catch (error) {
688
+ console.error('Error loading powered apps:', error);
689
+ }
690
+ }
691
+
692
+ window.addEventListener('DOMContentLoaded', loadPoweredApps);
693
+
694
+ setversion('VERSION_BEING_REPLACED')
695
+ </script>
696
+ </body>
697
+ </html>
698
+ """
699
+
700
+ # --- DO NOT CHANGE WITHOUT KNOWLEDGE ---
701
+
702
+ Static_Website = STATIC_WEBSITE
703
+ filename = 'index.html'
704
+ format = 'html'
705
+ TEMPTEXT = f'{int(time.time() * 1000)}{format}'
706
+ FILETEMPTEXT = f'tempfile_{TEMPTEXT}.html'
707
+
708
+ if Static_Website == True:
709
+ TEMPTEXT = f'index'
710
+ FILETEMPTEXT = f'index.html'
711
+ # --- Pydantic Models ---
712
+
713
+ class CacheInfoMixin(BaseModel):
714
+ fetched_at: datetime = Field(description="Timestamp when the data was fetched (or revalidated from cache).")
715
+ cached_at: Optional[datetime] = Field(None, description="Timestamp when the data was originally cached. None if fresh fetch.")
716
+ cache_expires_at: Optional[datetime] = Field(None, description="Timestamp when the cache for this item is set to expire.")
717
+ from_cache: bool = Field(description="Indicates if the response was served from cache.")
718
+
719
+ class FilterInfo(BaseModel):
720
+ capabilities: Optional[List[str]] = None
721
+
722
+ class ModelResultItem(BaseModel):
723
+ source_url: HttpUrl
724
+ namespace: str
725
+ model_base_name: str
726
+ name_full_model: str
727
+ description: str
728
+ pull_count_str: str
729
+ pull_count: int
730
+ tags_count: int
731
+ last_updated_str: str
732
+ last_updated_iso: datetime
733
+ capabilities: List[str] = []
734
+ sizes: List[str] = []
735
+
736
+ class SearchResponse(CacheInfoMixin):
737
+ query: Optional[str] = None
738
+ sort_order: str
739
+ filters: Optional[FilterInfo] = None
740
+ results: List[ModelResultItem]
741
+
742
+ class ModelListByNamespaceResponse(CacheInfoMixin): # Renamed from LibraryListResponse
743
+ queried_namespace: str
744
+ sort_order: str
745
+ filters: Optional[FilterInfo] = None
746
+ results: List[ModelResultItem]
747
+
748
+ class FileSummary(BaseModel):
749
+ name: str
750
+ blob_url: HttpUrl
751
+ digest: Optional[str] = None
752
+ size_str: str
753
+ snippet: str
754
+ updated_str: Optional[str] = None
755
+
756
+ class TagSummary(BaseModel):
757
+ tag_part: str
758
+ name_full_tag: str
759
+ size_str: Optional[str] = None
760
+ is_active: bool = False
761
+
762
+ class ModelPageResponse(CacheInfoMixin):
763
+ name_full_model: str
764
+ namespace: str
765
+ model_base_name: str
766
+ active_tag_part: Optional[str] = None
767
+ active_tag_full_name: Optional[str] = None
768
+ source_url: HttpUrl
769
+ summary: str
770
+ pull_count_str: str
771
+ pull_count: int
772
+ last_updated_str: str
773
+ last_updated_iso: datetime
774
+ capabilities: List[str] = []
775
+ sizes: List[str] = []
776
+ readme_content: str
777
+ tag_command: Optional[str] = None
778
+ tag_files_summary: List[FileSummary] = []
779
+ all_tags_dropdown_summary: List[TagSummary] = []
780
+ all_tags_page_url: HttpUrl
781
+ total_tags_count_from_link: Optional[int] = None
782
+
783
+ class TagDetailItem(BaseModel):
784
+ name_full_tag: str
785
+ tag_part: str
786
+ source_url: HttpUrl
787
+ digest: str
788
+ size_str: str
789
+ size_bytes: int
790
+ context_window_str: Optional[str] = None
791
+ input_type: Optional[str] = None
792
+ modified_str: str
793
+ modified_iso: datetime
794
+ is_default: bool = False
795
+
796
+ class AllTagsResponse(CacheInfoMixin):
797
+ name_full_model: str
798
+ namespace: str
799
+ model_base_name: str
800
+ tags_page_url: HttpUrl
801
+ tags: List[TagDetailItem]
802
+
803
+ class GGUFMetadata(BaseModel):
804
+ arch: Optional[str] = None
805
+ parameters: Optional[str] = None
806
+ quantization: Optional[str] = None
807
+ class Config:
808
+ extra = "allow"
809
+
810
+ class BlobDetailsResponse(CacheInfoMixin):
811
+ name_full_tag: str
812
+ canonical_name: str
813
+ source_url: HttpUrl
814
+ digest: str
815
+ size_str: str
816
+ text_content: Optional[str] = None
817
+ parsed_json_content: Optional[Union[Dict[str, Any], List[Any]]] = None
818
+ gguf_metadata_snippet: Optional[str] = None
819
+ parsed_gguf_metadata: Optional[GGUFMetadata] = None
820
+ listing_updated_str: Optional[str] = None
821
+ listing_updated_iso: Optional[datetime] = None
822
+
823
+
824
+ # --- FastAPI App ---
825
+ app = FastAPI(
826
+ title="Ollama.com Library API Proxy",
827
+ description="An API that fetches, parses, and caches data from ollama.com.",
828
+ version=CODE_VERSION, # Version increment
829
+ )
830
+
831
+ # --- Helper Functions ---
832
+
833
+ def get_cache_info_from_response(response: requests.Response) -> Dict[str, Any]:
834
+ """Gets cache information from a requests-cache response."""
835
+ cached_at = None
836
+ expires_at = None
837
+
838
+ if hasattr(response, 'created_at') and response.created_at: # type: ignore
839
+ cached_at_naive = response.created_at # type: ignore
840
+ cached_at = cached_at_naive.replace(tzinfo=timezone.utc) if cached_at_naive else None
841
+
842
+ if hasattr(response, 'expires') and response.expires: # type: ignore
843
+ expires_at_naive = response.expires # type: ignore
844
+ expires_at = expires_at_naive.replace(tzinfo=timezone.utc) if expires_at_naive else None
845
+ elif cached_at: # If created_at exists but expires is None, calculate based on default
846
+ default_expiry_seconds = 21600 # Default from install_cache
847
+ if hasattr(requests_cache.get_cache(), 'settings') and requests_cache.get_cache().settings.expire_after is not None: # type: ignore
848
+ default_expiry_seconds = requests_cache.get_cache().settings.expire_after # type: ignore
849
+ expires_at = cached_at + timedelta(seconds=default_expiry_seconds)
850
+
851
+ return {
852
+ "fetched_at": datetime.now(timezone.utc),
853
+ "cached_at": cached_at,
854
+ "cache_expires_at": expires_at,
855
+ "from_cache": getattr(response, 'from_cache', False)
856
+ }
857
+
858
+ def make_full_model_name(namespace: str, model_base_name: str) -> str:
859
+ if namespace == "library":
860
+ return model_base_name
861
+ return f"{namespace}/{model_base_name}"
862
+
863
+ def make_full_tag_name(namespace: str, model_base_name: str, tag_part: str) -> str:
864
+ full_model_name = make_full_model_name(namespace, model_base_name)
865
+ return f"{full_model_name}:{tag_part}"
866
+
867
+ def parse_pull_count(pull_str: str) -> int:
868
+ pull_str = pull_str.lower().replace(',', '').strip()
869
+ if not pull_str: return 0
870
+ if 'm' in pull_str:
871
+ return int(float(pull_str.replace('m', '')) * 1_000_000)
872
+ elif 'k' in pull_str:
873
+ return int(float(pull_str.replace('k', '')) * 1_000)
874
+ try:
875
+ return int(pull_str)
876
+ except ValueError:
877
+ return 0
878
+
879
+ def parse_relative_date_to_datetime(relative_str: str, base_time: datetime = datetime.now(timezone.utc)) -> datetime:
880
+ relative_str = relative_str.lower().strip()
881
+ if "just now" in relative_str or "moments ago" in relative_str:
882
+ return base_time
883
+ if "yesterday" in relative_str:
884
+ # Set time to midnight of yesterday
885
+ return base_time.replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=1)
886
+
887
+ match = re.match(r"(\d+)\s+(minute|hour|day|week|month|year)s?\s+ago", relative_str)
888
+ if match:
889
+ value = int(match.group(1))
890
+ unit = match.group(2)
891
+ if unit == "minute": return base_time - timedelta(minutes=value)
892
+ if unit == "hour": return base_time - timedelta(hours=value)
893
+ if unit == "day": return base_time - timedelta(days=value)
894
+ if unit == "week": return base_time - timedelta(weeks=value)
895
+ # Approximation for month/year
896
+ if unit == "month": return base_time - timedelta(days=value * 30)
897
+ if unit == "year": return base_time - timedelta(days=value * 365)
898
+
899
+ print(f"Warning: Could not parse relative date '{relative_str}'. Returning base_time.")
900
+ return base_time
901
+
902
+ def parse_ollama_absolute_date_str(date_str: str) -> datetime:
903
+ try:
904
+ dt_naive = datetime.strptime(date_str.replace(" UTC", "").strip(), '%b %d, %Y %I:%M %p')
905
+ return dt_naive.replace(tzinfo=timezone.utc)
906
+ except ValueError as e:
907
+ print(f"Warning: Could not parse absolute date string '{date_str}': {e}. Using current time.")
908
+ return datetime.now(timezone.utc)
909
+
910
+ def parse_size_str_to_bytes(size_str: str) -> int:
911
+ if not size_str: return 0
912
+ size_str_upper = size_str.upper().strip()
913
+
914
+ val_str = re.sub(r"[^0-9.]", "", size_str)
915
+ if not val_str: return 0
916
+ try:
917
+ val = float(val_str)
918
+ except ValueError:
919
+ return 0
920
+
921
+ if "KB" in size_str_upper or "KIB" in size_str_upper: return int(val * 1024)
922
+ if "MB" in size_str_upper or "MIB" in size_str_upper: return int(val * 1024 * 1024)
923
+ if "GB" in size_str_upper or "GIB" in size_str_upper: return int(val * 1024 * 1024 * 1024)
924
+ if "TB" in size_str_upper or "TIB" in size_str_upper: return int(val * 1024 * 1024 * 1024 * 1024)
925
+ # Assuming plain number or Bytes (B) is bytes
926
+ try:
927
+ return int(val)
928
+ except ValueError:
929
+ return 0
930
+
931
+ # --- HTML Parsing Functions ---
932
+
933
+ def parse_model_listing_item(item_li: BeautifulSoup, base_url: str) -> Optional[Dict[str, Any]]:
934
+ anchor = item_li.select_one('a[href]')
935
+ if not anchor: return None
936
+
937
+ raw_source_url = anchor['href']
938
+ source_url = urljoin(base_url, raw_source_url)
939
+
940
+ path_parts = urlparse(source_url).path.strip('/').split('/')
941
+ namespace = "library" # Default
942
+ model_base_name = ""
943
+
944
+ if len(path_parts) >= 2:
945
+ if path_parts[0].lower() == 'library' and len(path_parts) > 1:
946
+ namespace = 'library'
947
+ model_base_name = path_parts[1].lower()
948
+ elif path_parts[0].lower() != 'library': # User namespace
949
+ namespace = path_parts[0].lower()
950
+ model_base_name = path_parts[1].lower()
951
+
952
+ # Robust parsing of model name from title or URL
953
+ title_h2_span = item_li.select_one('h2 span[x-test-search-response-title]')
954
+ if title_h2_span:
955
+ full_name_from_title = title_h2_span.get_text(strip=True)
956
+ if '/' in full_name_from_title:
957
+ ns_from_title, mbn_from_title = full_name_from_title.split('/',1)
958
+ namespace = ns_from_title.lower()
959
+ model_base_name = mbn_from_title.lower()
960
+ elif namespace == "library": # If it's a library model, title is just base_name
961
+ model_base_name = full_name_from_title.lower()
962
+
963
+ if not model_base_name or (namespace != "library" and '/' not in item_li.select_one('h2').get_text()): # Refine fallback check
964
+ title_div = item_li.select_one('div[x-test-model-title]')
965
+ if title_div and title_div.has_attr('title'):
966
+ full_name_from_title_attr = title_div['title'].lower()
967
+ if '/' in full_name_from_title_attr :
968
+ namespace, model_base_name = full_name_from_title_attr.split('/',1)
969
+ else:
970
+ model_base_name = full_name_from_title_attr
971
+
972
+
973
+ if not model_base_name:
974
+ return None
975
+
976
+ description_p = item_li.select_one('p.max-w-lg.break-words')
977
+ description = description_p.get_text(separator=" ", strip=True) if description_p else ""
978
+
979
+ pull_count_span = item_li.select_one('span[x-test-pull-count]')
980
+ pull_count_str = pull_count_span.get_text(strip=True) if pull_count_span else "0"
981
+ pull_count = parse_pull_count(pull_count_str)
982
+
983
+ tags_count_span = item_li.select_one('span[x-test-tag-count]')
984
+ tags_count = int(tags_count_span.get_text(strip=True)) if tags_count_span and tags_count_span.get_text(strip=True).isdigit() else 0
985
+
986
+ last_updated_iso_datetime = datetime.now(timezone.utc)
987
+ last_updated_str = ""
988
+ updated_span_el = item_li.select_one('span[x-test-updated]')
989
+ if updated_span_el:
990
+ last_updated_str = updated_span_el.get_text(strip=True)
991
+ parent_title_span = updated_span_el.find_parent('span', title=True)
992
+ if parent_title_span and parent_title_span['title']:
993
+ try:
994
+ last_updated_iso_datetime = parse_ollama_absolute_date_str(parent_title_span['title'])
995
+ except ValueError:
996
+ last_updated_iso_datetime = parse_relative_date_to_datetime(last_updated_str)
997
+ else:
998
+ last_updated_iso_datetime = parse_relative_date_to_datetime(last_updated_str)
999
+
1000
+ capabilities = [cap.get_text(strip=True).lower() for cap in item_li.select('span[x-test-capability]')]
1001
+ sizes = [size.get_text(strip=True).lower() for size in item_li.select('span[x-test-size]')]
1002
+
1003
+ return {
1004
+ "source_url": source_url,
1005
+ "namespace": namespace,
1006
+ "model_base_name": model_base_name,
1007
+ "name_full_model": make_full_model_name(namespace, model_base_name),
1008
+ "description": description,
1009
+ "pull_count_str": pull_count_str,
1010
+ "pull_count": pull_count,
1011
+ "tags_count": tags_count,
1012
+ "last_updated_str": last_updated_str,
1013
+ "last_updated_iso": last_updated_iso_datetime.isoformat(),
1014
+ "capabilities": capabilities,
1015
+ "sizes": sizes,
1016
+ }
1017
+
1018
+ def parse_list_or_search_page_html(html_content: str, base_url: str = OLLAMA_COM_BASE_URL) -> List[Dict[str, Any]]:
1019
+ soup = BeautifulSoup(html_content, 'html.parser')
1020
+ model_items_data = []
1021
+ list_items = soup.select('ul[role="list"] li[x-test-model]')
1022
+ for item_li in list_items:
1023
+ parsed_item = parse_model_listing_item(item_li, base_url)
1024
+ if parsed_item:
1025
+ model_items_data.append(parsed_item)
1026
+ return model_items_data
1027
+
1028
+ def parse_model_page_html(html_content: str, page_url: str) -> Dict[str, Any]:
1029
+ soup = BeautifulSoup(html_content, 'html.parser')
1030
+
1031
+ path_parts = urlparse(page_url).path.strip('/').split('/')
1032
+ namespace = "library"
1033
+ model_base_name_from_url = ""
1034
+ active_tag_part_from_url = None
1035
+
1036
+ if len(path_parts) >= 2:
1037
+ ns_or_model = path_parts[0].lower()
1038
+ model_or_tag = path_parts[1].lower()
1039
+
1040
+ if len(path_parts) >= 3 and path_parts[1].lower() != 'tags':
1041
+ namespace = ns_or_model
1042
+ model_tag_combo = model_or_tag
1043
+ if ':' in model_tag_combo:
1044
+ model_base_name_from_url, active_tag_part_from_url = model_tag_combo.split(':', 1)
1045
+ else:
1046
+ model_base_name_from_url = model_tag_combo
1047
+ elif len(path_parts) >= 2 and path_parts[0].lower() == 'library':
1048
+ namespace = 'library'
1049
+ model_tag_combo = model_or_tag
1050
+ if ':' in model_tag_combo:
1051
+ model_base_name_from_url, active_tag_part_from_url = model_tag_combo.split(':', 1)
1052
+ else:
1053
+ model_base_name_from_url = model_tag_combo
1054
+ elif len(path_parts) >=2 and path_parts[0].lower() != 'library': # user/model (no tag)
1055
+ namespace = ns_or_model
1056
+ model_base_name_from_url = model_or_tag
1057
+
1058
+
1059
+ model_name_a = soup.select_one('a[x-test-model-name][title]')
1060
+ model_base_name = model_name_a['title'].lower() if model_name_a else model_base_name_from_url
1061
+
1062
+ if namespace == 'library' and model_name_a and '/' in model_name_a['title']:
1063
+ ns_from_title, mbn_from_title = model_name_a['title'].lower().split('/', 1)
1064
+ namespace = ns_from_title
1065
+ model_base_name = mbn_from_title
1066
+ elif namespace != 'library' and model_name_a and '/' not in model_name_a['title'] and not model_base_name:
1067
+ model_base_name = model_name_a['title'].lower() # Case: namespace from URL, model base name from title
1068
+ elif not model_base_name and model_name_a: # General fallback if model_base_name is still not set
1069
+ model_base_name = model_name_a['title'].lower()
1070
+
1071
+
1072
+ summary_span = soup.select_one('#summary-content span, #summary-content')
1073
+ summary = summary_span.get_text(separator=" ", strip=True) if summary_span else "Summary not found."
1074
+ if not summary.strip() or summary.strip().lower() == "no summary":
1075
+ summary_textarea = soup.select_one('#summary-textarea')
1076
+ if summary_textarea:
1077
+ summary = summary_textarea.get_text(separator=" ", strip=True)
1078
+
1079
+
1080
+ pull_count_span = soup.select_one('span[x-test-pull-count]')
1081
+ pull_count_str = pull_count_span.get_text(strip=True) if pull_count_span else "0"
1082
+ pull_count = parse_pull_count(pull_count_str)
1083
+
1084
+ updated_span_relative = soup.select_one('span[x-test-updated]')
1085
+ last_updated_str = ""
1086
+ last_updated_iso = datetime.now(timezone.utc)
1087
+
1088
+ if updated_span_relative:
1089
+ last_updated_str = updated_span_relative.get_text(strip=True)
1090
+ parent_title_span = updated_span_relative.find_parent('span', title=True)
1091
+ if parent_title_span and parent_title_span['title']:
1092
+ try:
1093
+ last_updated_iso = parse_ollama_absolute_date_str(parent_title_span['title'])
1094
+ except ValueError: last_updated_iso = parse_relative_date_to_datetime(last_updated_str)
1095
+ else: last_updated_iso = parse_relative_date_to_datetime(last_updated_str)
1096
+
1097
+ capabilities = [cap.get_text(strip=True).lower() for cap in soup.select('div.flex-wrap span.bg-indigo-50')]
1098
+ sizes = [size.get_text(strip=True).lower() for size in soup.select('span[x-test-size]')]
1099
+
1100
+ tag_selection_section = soup.select_one('section[x-test-model-tag-selection]')
1101
+ active_tag_part = active_tag_part_from_url
1102
+ tag_command = None
1103
+
1104
+ if tag_selection_section:
1105
+ active_tag_button_div = tag_selection_section.select_one('button[name="tag"] div.truncate')
1106
+ if active_tag_button_div and not active_tag_part:
1107
+ active_tag_part = active_tag_button_div.get_text(strip=True).lower()
1108
+
1109
+ command_input = tag_selection_section.select_one('input.command[name="command"]')
1110
+ if command_input:
1111
+ tag_command = command_input['value']
1112
+ if not active_tag_part and tag_command and ":" in tag_command:
1113
+ active_tag_part = tag_command.split(":")[-1].lower()
1114
+ elif not active_tag_part and tag_command and model_base_name in tag_command:
1115
+ run_command_parts = tag_command.strip().split()
1116
+ expected_full_model_name = make_full_model_name(namespace, model_base_name)
1117
+ if len(run_command_parts) == 3 and run_command_parts[0] == 'ollama' and run_command_parts[1] == 'run' and run_command_parts[2] == expected_full_model_name:
1118
+ active_tag_part = "latest"
1119
+
1120
+
1121
+ tag_files_summary = []
1122
+ file_explorer_section = soup.select_one('#file-explorer section')
1123
+ if file_explorer_section:
1124
+ listing_updated_str_fe = ""
1125
+ #listing_updated_iso_fe = None # Not used for now
1126
+ updated_p_fe = file_explorer_section.select_one('div.bg-neutral-50 > p:first-of-type')
1127
+ if updated_p_fe:
1128
+ listing_updated_str_raw_fe = updated_p_fe.get_text(strip=True)
1129
+ if "Updated" in listing_updated_str_raw_fe:
1130
+ listing_updated_str_fe = listing_updated_str_raw_fe.replace("Updated","").strip()
1131
+ elif re.match(r"\d+ \w+ ago", listing_updated_str_raw_fe): # Handles "X days ago"
1132
+ listing_updated_str_fe = listing_updated_str_raw_fe
1133
+
1134
+
1135
+ for file_a in file_explorer_section.select('a.group.block.grid-cols-12'):
1136
+ name_div = file_a.select_one('div.sm\\:col-span-2')
1137
+ name = name_div.get_text(strip=True).lower() if name_div else "unknown"
1138
+
1139
+ blob_url_href = file_a['href']
1140
+ blob_url = urljoin(OLLAMA_COM_BASE_URL, blob_url_href)
1141
+
1142
+ url_path_parts = urlparse(blob_url).path.strip('/').split('/')
1143
+ digest_from_url = None
1144
+ if len(url_path_parts) > 1 and url_path_parts[-2] == "blobs":
1145
+ digest_from_url = url_path_parts[-1]
1146
+ if not re.match(r"^[0-9a-fA-F]{12,}$", digest_from_url): digest_from_url = None
1147
+
1148
+
1149
+ size_div = file_a.select_one('div.sm\\:col-start-12')
1150
+ size_str = size_div.get_text(strip=True) if size_div else "0B"
1151
+
1152
+ snippet_div = file_a.select_one('div.sm\\:col-span-8')
1153
+ snippet = snippet_div.get_text(separator=" ", strip=True) if snippet_div else ""
1154
+
1155
+ tag_files_summary.append(FileSummary(
1156
+ name=name, blob_url=blob_url, digest=digest_from_url,
1157
+ size_str=size_str, snippet=snippet, updated_str=listing_updated_str_fe
1158
+ ))
1159
+
1160
+ readme_div = soup.select_one('#readme #display')
1161
+ readme_content = str(readme_div) if readme_div else "<p>Readme not found.</p>"
1162
+
1163
+ all_tags_dropdown_summary = []
1164
+ tags_nav = soup.select_one('#tags-nav')
1165
+ if tags_nav:
1166
+ for tag_a_dropdown in tags_nav.select(f'a[href^="/library/"], a[href^="/{namespace}/"]'):
1167
+ if "View all" in tag_a_dropdown.get_text(): continue
1168
+
1169
+ tag_name_span = tag_a_dropdown.select_one('span.truncate span.group-hover\\:underline')
1170
+ tag_part_from_dropdown = tag_name_span.get_text(strip=True).lower() if tag_name_span else ""
1171
+
1172
+ size_span_dropdown = tag_a_dropdown.select_one('span.text-xs.text-neutral-400')
1173
+ size_str_from_dropdown = size_span_dropdown.get_text(strip=True) if size_span_dropdown else None
1174
+
1175
+ is_active_tag_dropdown = ('bg-neutral-100' in tag_a_dropdown.get('class', []))
1176
+
1177
+ if not active_tag_part and is_active_tag_dropdown:
1178
+ active_tag_part = tag_part_from_dropdown
1179
+
1180
+ all_tags_dropdown_summary.append(TagSummary(
1181
+ tag_part=tag_part_from_dropdown,
1182
+ name_full_tag=make_full_tag_name(namespace, model_base_name, tag_part_from_dropdown),
1183
+ size_str=size_str_from_dropdown,
1184
+ is_active=(active_tag_part == tag_part_from_dropdown)
1185
+ ))
1186
+
1187
+ if not active_tag_part and all_tags_dropdown_summary:
1188
+ active_tag_part = all_tags_dropdown_summary[0].tag_part
1189
+ all_tags_dropdown_summary[0].is_active = True
1190
+ elif active_tag_part:
1191
+ found_active = False
1192
+ for ts in all_tags_dropdown_summary:
1193
+ ts.is_active = (ts.tag_part == active_tag_part)
1194
+ if ts.is_active:
1195
+ found_active = True
1196
+
1197
+ active_tag_full_name = make_full_tag_name(namespace, model_base_name, active_tag_part) if active_tag_part else None
1198
+
1199
+
1200
+ all_tags_page_link = soup.select_one('a[x-test-tags-link]')
1201
+ all_tags_page_url_str = f"{OLLAMA_COM_BASE_URL}/{namespace}/{model_base_name}/tags" # Default construction
1202
+ if all_tags_page_link and all_tags_page_link.has_attr('href'):
1203
+ all_tags_page_url_str = urljoin(OLLAMA_COM_BASE_URL, all_tags_page_link['href'])
1204
+
1205
+ total_tags_count_from_link = 0
1206
+ if all_tags_page_link:
1207
+ count_match = re.search(r'(\d+)\s+Tags', all_tags_page_link.get_text())
1208
+ if count_match: total_tags_count_from_link = int(count_match.group(1))
1209
+
1210
+ return {
1211
+ "name_full_model": make_full_model_name(namespace, model_base_name),
1212
+ "namespace": namespace,
1213
+ "model_base_name": model_base_name,
1214
+ "active_tag_part": active_tag_part,
1215
+ "active_tag_full_name": active_tag_full_name,
1216
+ "source_url": page_url, "summary": summary, "pull_count_str": pull_count_str,
1217
+ "pull_count": pull_count, "last_updated_str": last_updated_str,
1218
+ "last_updated_iso": last_updated_iso.isoformat(), "capabilities": capabilities,
1219
+ "sizes": sizes, "readme_content": readme_content, "tag_command": tag_command,
1220
+ "tag_files_summary": tag_files_summary,
1221
+ "all_tags_dropdown_summary": all_tags_dropdown_summary,
1222
+ "all_tags_page_url": all_tags_page_url_str,
1223
+ "total_tags_count_from_link": total_tags_count_from_link
1224
+ }
1225
+
1226
+
1227
+ def parse_all_tags_page_html(html_content: str, page_url: str, model_namespace: str, model_base_name_in: str) -> Dict[str, Any]:
1228
+ soup = BeautifulSoup(html_content, 'html.parser')
1229
+ tags_list = []
1230
+
1231
+ list_items = soup.select('ul > li.group.p-3')
1232
+ for item_li in list_items:
1233
+ tag_anchor = item_li.select_one('a.hover\\:underline')
1234
+ if not tag_anchor or not tag_anchor.has_attr('href'): continue
1235
+
1236
+ full_tag_name_text_raw = tag_anchor.get_text(strip=True)
1237
+ expected_full_model_name = make_full_model_name(model_namespace, model_base_name_in).lower()
1238
+ if ':' in full_tag_name_text_raw:
1239
+ parts = full_tag_name_text_raw.split(':',1) # Split only on first colon
1240
+ if len(parts) == 2 and parts[0].lower() == expected_full_model_name:
1241
+ tag_part = parts[1]
1242
+ else:
1243
+ continue
1244
+ else:
1245
+ if full_tag_name_text_raw.lower() == expected_full_model_name:
1246
+ tag_part = "latest"
1247
+ else:
1248
+ continue
1249
+
1250
+
1251
+ source_url = urljoin(OLLAMA_COM_BASE_URL, tag_anchor['href'])
1252
+
1253
+ digest_span = item_li.select_one('div.font-mono.text-\\[13px\\]')
1254
+ digest = digest_span.get_text(strip=True) if digest_span else "unknown-digest"
1255
+
1256
+ size_str, context_window_str, input_type, modified_str = "N/A", None, None, "N/A"
1257
+ modified_iso = datetime.now(timezone.utc)
1258
+
1259
+ details_div = item_li.select_one('div.hidden.md\\:grid')
1260
+ if details_div:
1261
+ cols = details_div.select('div.grid.grid-cols-12 > div')
1262
+ if len(cols) > 1: size_str = cols[1].get_text(strip=True)
1263
+ if len(cols) > 2: context_window_str = cols[2].get_text(strip=True) if cols[2].get_text(strip=True) != '-' else None
1264
+ if len(cols) > 3: input_type = cols[3].get_text(strip=True) if cols[3].get_text(strip=True) != '-' else None
1265
+ if len(cols) > 4:
1266
+ modified_str = cols[4].get_text(strip=True)
1267
+ modified_span_title = cols[4].select_one('span[title]')
1268
+ if modified_span_title and modified_span_title.has_attr('title'):
1269
+ try:
1270
+ modified_iso = parse_ollama_absolute_date_str(modified_span_title['title'])
1271
+ except ValueError:
1272
+ modified_iso = parse_relative_date_to_datetime(modified_str)
1273
+ else:
1274
+ modified_iso = parse_relative_date_to_datetime(modified_str)
1275
+
1276
+ else:
1277
+ mobile_details_span = item_li.select_one('a.md\\:hidden span:not([class*="group-hover:underline"])')
1278
+ if mobile_details_span:
1279
+ all_texts = [s.strip() for s in mobile_details_span.find_all(string=True, recursive=True) if s.strip()]
1280
+ full_text = " ".join(all_texts)
1281
+ parts = [p.strip() for p in full_text.split('•')]
1282
+
1283
+ if len(parts) > 0:
1284
+ digest_match = re.search(r"[0-9a-f]{7,}", parts[0]) # Shorter match for mobile digest
1285
+ if digest_match: digest = digest_match.group(0)
1286
+
1287
+ for part_idx, part_text in enumerate(parts):
1288
+ part_lower = part_text.lower()
1289
+ if 'gb' in part_lower or 'mb' in part_lower or 'kb' in part_lower: size_str = part_text
1290
+ elif 'context' in part_lower: context_window_str = part_text.replace('context','').strip()
1291
+ elif 'input' in part_lower: input_type = part_text.replace('input','').strip()
1292
+ elif any(kw in part_lower for kw in ['ago', 'yesterday', 'now', 'updated', 'modified']):
1293
+ modified_str = part_text
1294
+ modified_iso = parse_relative_date_to_datetime(modified_str)
1295
+
1296
+
1297
+ is_default_badge = item_li.select_one('span.text-blue-600:contains("Default")')
1298
+ is_default = bool(is_default_badge)
1299
+
1300
+ tags_list.append(TagDetailItem(
1301
+ name_full_tag=make_full_tag_name(model_namespace, model_base_name_in, tag_part),
1302
+ tag_part=tag_part, source_url=source_url, digest=digest,
1303
+ size_str=size_str, size_bytes=parse_size_str_to_bytes(size_str),
1304
+ context_window_str=context_window_str if context_window_str and context_window_str != '-' else None,
1305
+ input_type=input_type if input_type and input_type != '-' else None,
1306
+ modified_str=modified_str, modified_iso=modified_iso, is_default=is_default
1307
+ ))
1308
+
1309
+ return {
1310
+ "name_full_model": make_full_model_name(model_namespace, model_base_name_in),
1311
+ "namespace": model_namespace,
1312
+ "model_base_name": model_base_name_in,
1313
+ "tags_page_url": page_url,
1314
+ "tags": tags_list
1315
+ }
1316
+
1317
+
1318
+ def parse_blob_content_page_html(html_content: str) -> Optional[str]:
1319
+ """Parses a blob content page (e.g., for params, template) to extract text from <pre>."""
1320
+ soup = BeautifulSoup(html_content, 'html.parser')
1321
+ pre_tag = soup.select_one('pre')
1322
+ if pre_tag:
1323
+ return pre_tag.get_text()
1324
+ return None
1325
+
1326
+ def parse_gguf_metadata_from_snippet(snippet: str) -> Optional[GGUFMetadata]:
1327
+ if not snippet or not isinstance(snippet, str): return None
1328
+
1329
+ metadata = {}
1330
+ parts = re.split(r'[·, ]+', snippet.lower().strip())
1331
+
1332
+ for part in parts:
1333
+ if part.startswith("arch:"):
1334
+ metadata["arch"] = part.replace("arch:", "").strip()
1335
+ elif part.startswith("parameters:"):
1336
+ metadata["parameters"] = part.replace("parameters:", "").strip().upper()
1337
+ elif part.startswith("quantization:"):
1338
+ metadata["quantization"] = part.replace("quantization:", "").strip().upper()
1339
+ elif re.match(r"^[^:]+$", part):
1340
+ if re.match(r"^\w+$", part) and "arch" not in metadata and not any(c.isdigit() for c in part): metadata["arch"] = part
1341
+ elif re.match(r"^\d+(\.\d+)?[a-z]+$", part) and "parameters" not in metadata: metadata["parameters"] = part.upper()
1342
+ elif (re.match(r"^[fq]\d+(_\d+|[a-z_]+)?$", part) or part in ["q2_k", "q3_k_s", "q3_k_m", "q3_k_l", "q4_0", "q4_1", "q4_k_s", "q4_k_m", "q5_0", "q5_1", "q5_k_s", "q5_k_m", "q6_k", "q8_0"]) and "quantization" not in metadata: metadata["quantization"] = part.upper()
1343
+
1344
+
1345
+ return GGUFMetadata(**metadata) if metadata else None
1346
+
1347
+
1348
+ # --- API Endpoints (Reordered for FastAPI matching) ---
1349
+
1350
+
1351
+
1352
+ app = FastAPI()
1353
+
1354
+ # Store the last N ping durations (in seconds)
1355
+ ping_durations = deque(maxlen=100)
1356
+
1357
+ # Store the total bytes sent (simulated bandwidth usage)
1358
+ total_bytes_sent = 0
1359
+
1360
+ @app.get("/ping")
1361
+ async def ping(request: Request):
1362
+ global total_bytes_sent
1363
+
1364
+ # Simulated pong response
1365
+ response_data = {"message": "pong"}
1366
+ response = JSONResponse(content=response_data)
1367
+
1368
+ # Approximate response size in bytes
1369
+ response_body = response.body
1370
+ response_size = len(response_body)
1371
+ total_bytes_sent += response_size
1372
+
1373
+ metrics = {
1374
+ "status": "pong",
1375
+ "total_pings": len(ping_durations),
1376
+ "total_bandwidth_sent_kb": round(total_bytes_sent / 1024, 2),
1377
+ "last_response_size_bytes": response_size
1378
+ }
1379
+
1380
+ return JSONResponse(content=metrics)
1381
+
1382
+
1383
+ @app.get("/", include_in_schema=False)
1384
+ async def read_index():
1385
+ return HTMLResponse(content=dummy_html_content.replace("VERSION_BEING_REPLACED", CODE_VERSION)) # Had a lot of errors, just return html to fix everything ;)
1386
+ script_dir = os.path.dirname(__file__)
1387
+ html_file_path = os.path.join(script_dir, "static", FILETEMPTEXT)
1388
+ if not os.path.exists(html_file_path):
1389
+ return HTMLResponse(content=dummy_html_content or "<h1>Ollama Library API</h1><p>index.html not found. See <a href='/docs'>API Documentation</a>.</p>", status_code=404)
1390
+ return FileResponse(path=html_file_path, media_type="text/html")
1391
+
1392
+
1393
+ @app.get("/search", response_model=SearchResponse, summary="Search Models")
1394
+ async def search_models(
1395
+ q: str = Query(..., description="The search query."),
1396
+ o: Optional[str] = Query("popular", description="Sort order: 'popular' or 'newest'."),
1397
+ c: Optional[str] = Query(None, description="Comma-separated list of capabilities to filter by.")
1398
+ ):
1399
+ if o not in ["popular", "newest"]:
1400
+ raise HTTPException(status_code=400, detail="Invalid sort order 'o'. Must be 'popular' or 'newest'.")
1401
+
1402
+ params = {"q": q, "o": o}
1403
+ if c:
1404
+ params["c"] = c.lower()
1405
+
1406
+ search_url = f"{OLLAMA_COM_BASE_URL}/search"
1407
+ try:
1408
+ response = cached_session.get(search_url, params=params)
1409
+ response.raise_for_status()
1410
+ except requests.exceptions.RequestException as e:
1411
+ raise HTTPException(status_code=503, detail=f"Failed to fetch search results from ollama.com: {e}")
1412
+
1413
+ cache_info = get_cache_info_from_response(response)
1414
+ parsed_results_data = parse_list_or_search_page_html(response.text, base_url=OLLAMA_COM_BASE_URL)
1415
+
1416
+ final_results = []
1417
+ capabilities_list_for_filterinfo = None
1418
+ if c:
1419
+ requested_caps = set(cap.strip().lower() for cap in c.split(','))
1420
+ capabilities_list_for_filterinfo = sorted(list(requested_caps))
1421
+ for model_data in parsed_results_data:
1422
+ model_caps_set = set(model_data.get("capabilities", []))
1423
+ if requested_caps.issubset(model_caps_set):
1424
+ final_results.append(ModelResultItem(**model_data))
1425
+ else:
1426
+ final_results = [ModelResultItem(**model_data) for model_data in parsed_results_data]
1427
+
1428
+ return SearchResponse(
1429
+ query=q, sort_order=o,
1430
+ filters=FilterInfo(capabilities=capabilities_list_for_filterinfo) if capabilities_list_for_filterinfo else None,
1431
+ results=final_results,
1432
+ **cache_info
1433
+ )
1434
+
1435
+ @app.get("/{namespace}/{model_base_name}:{tag_part}/blobs/{blob_identifier}", response_model=BlobDetailsResponse, summary="Get Blob Information")
1436
+ async def get_blob_information(
1437
+ namespace: str = FastApiPath(..., description="Model namespace."),
1438
+ model_base_name: str = FastApiPath(..., description="Base name of the model."),
1439
+ tag_part: str = FastApiPath(..., description="The tag part (e.g., '8b')."),
1440
+ blob_identifier: str = FastApiPath(..., description="Blob filename (e.g. 'model', 'params', 'template') or digest.")
1441
+ ):
1442
+ norm_ns = namespace.lower()
1443
+ norm_mbn = model_base_name.lower()
1444
+ norm_tp = tag_part.lower()
1445
+ norm_bi = blob_identifier.lower()
1446
+
1447
+ tag_page_url = f"{OLLAMA_COM_BASE_URL}/{norm_ns}/{norm_mbn}:{norm_tp}"
1448
+ try:
1449
+ tag_page_response = cached_session.get(tag_page_url)
1450
+ if tag_page_response.status_code == 404:
1451
+ if norm_ns != 'library':
1452
+ library_tag_url = f"{OLLAMA_COM_BASE_URL}/library/{norm_mbn}:{norm_tp}"
1453
+ tag_page_response = cached_session.get(library_tag_url)
1454
+ if tag_page_response.status_code == 404:
1455
+ raise HTTPException(status_code=404, detail=f"Tag page for '{make_full_tag_name(namespace, model_base_name, tag_part)}' not found.")
1456
+ norm_ns = 'library'
1457
+ tag_page_url = library_tag_url
1458
+ else:
1459
+ raise HTTPException(status_code=404, detail=f"Tag page for '{make_full_tag_name(namespace, model_base_name, tag_part)}' not found.")
1460
+ tag_page_response.raise_for_status()
1461
+ except requests.exceptions.RequestException as e:
1462
+ raise HTTPException(status_code=503, detail=f"Failed to fetch tag page: {e}")
1463
+
1464
+ parsed_tag_page = parse_model_page_html(tag_page_response.text, tag_page_url)
1465
+
1466
+ found_file_summary: Optional[FileSummary] = None
1467
+ for fs in parsed_tag_page.get("tag_files_summary", []):
1468
+ if fs.name.lower() == norm_bi or (fs.digest and fs.digest.lower().startswith(norm_bi)):
1469
+ found_file_summary = fs
1470
+ break
1471
+
1472
+ if not found_file_summary:
1473
+ raise HTTPException(status_code=404, detail=f"Blob identifier '{blob_identifier}' not found for tag '{make_full_tag_name(norm_ns, norm_mbn, norm_tp)}'.")
1474
+
1475
+ text_content = None
1476
+ parsed_json_content = None
1477
+ gguf_metadata_snippet = None
1478
+ parsed_gguf_metadata = None
1479
+ blob_detail_url = found_file_summary.blob_url
1480
+ blob_cache_info = get_cache_info_from_response(tag_page_response)
1481
+ text_blob_names = ["params", "template", "license", "modelfile"]
1482
+
1483
+ if found_file_summary.name in text_blob_names:
1484
+ try:
1485
+ blob_content_response = cached_session.get(str(blob_detail_url))
1486
+ blob_content_response.raise_for_status()
1487
+ blob_cache_info = get_cache_info_from_response(blob_content_response)
1488
+ content_type = blob_content_response.headers.get("Content-Type", "")
1489
+ if "text/html" in content_type:
1490
+ text_content = parse_blob_content_page_html(blob_content_response.text)
1491
+ else:
1492
+ text_content = blob_content_response.text
1493
+ if text_content and found_file_summary.name == "params":
1494
+ try: parsed_json_content = json.loads(text_content)
1495
+ except json.JSONDecodeError: pass
1496
+ except requests.exceptions.RequestException as e:
1497
+ print(f"Warning: Could not fetch blob content from {blob_detail_url}: {e}. Using snippet.")
1498
+ text_content = found_file_summary.snippet
1499
+ elif found_file_summary.name == "model":
1500
+ gguf_metadata_snippet = found_file_summary.snippet
1501
+ if gguf_metadata_snippet:
1502
+ parsed_gguf_metadata = parse_gguf_metadata_from_snippet(gguf_metadata_snippet)
1503
+ else:
1504
+ text_content = found_file_summary.snippet
1505
+
1506
+ listing_updated_iso_val = None
1507
+ if found_file_summary.updated_str: # updated_str is from the file listing section overall
1508
+ listing_updated_iso_val = parse_relative_date_to_datetime(found_file_summary.updated_str)
1509
+
1510
+ return BlobDetailsResponse(
1511
+ name_full_tag=make_full_tag_name(norm_ns, norm_mbn, norm_tp),
1512
+ canonical_name=found_file_summary.name,
1513
+ source_url=blob_detail_url,
1514
+ digest=found_file_summary.digest or "unknown-digest",
1515
+ size_str=found_file_summary.size_str,
1516
+ text_content=text_content,
1517
+ parsed_json_content=parsed_json_content,
1518
+ gguf_metadata_snippet=gguf_metadata_snippet,
1519
+ parsed_gguf_metadata=parsed_gguf_metadata,
1520
+ listing_updated_str=found_file_summary.updated_str,
1521
+ listing_updated_iso=listing_updated_iso_val,
1522
+ **blob_cache_info
1523
+ )
1524
+
1525
+ @app.get("/{namespace}/{model_base_name}:{tag_part}", response_model=ModelPageResponse, summary="Get Specific Tag Details")
1526
+ async def get_specific_tag_details(
1527
+ namespace: str = FastApiPath(..., description="Model namespace."),
1528
+ model_base_name: str = FastApiPath(..., description="Base name of the model."),
1529
+ tag_part: str = FastApiPath(..., description="The tag part (e.g., 'latest', '8b').")
1530
+ ):
1531
+ norm_ns = namespace.lower()
1532
+ norm_mbn = model_base_name.lower()
1533
+ norm_tp = tag_part.lower()
1534
+ tag_page_url = f"{OLLAMA_COM_BASE_URL}/{norm_ns}/{norm_mbn}:{norm_tp}"
1535
+
1536
+ try:
1537
+ response = cached_session.get(tag_page_url)
1538
+ if response.status_code == 404:
1539
+ if norm_ns != 'library':
1540
+ library_tag_url = f"{OLLAMA_COM_BASE_URL}/library/{norm_mbn}:{norm_tp}"
1541
+ response = cached_session.get(library_tag_url)
1542
+ if response.status_code == 404:
1543
+ raise HTTPException(status_code=404, detail=f"Tag page '{namespace}/{model_base_name}:{tag_part}' not found.")
1544
+ norm_ns = 'library'
1545
+ tag_page_url = library_tag_url
1546
+ else:
1547
+ raise HTTPException(status_code=404, detail=f"Tag page '{namespace}/{model_base_name}:{tag_part}' not found.")
1548
+ response.raise_for_status()
1549
+ except requests.exceptions.RequestException as e:
1550
+ raise HTTPException(status_code=503, detail=f"Failed to fetch specific tag page: {e}")
1551
+
1552
+ cache_info = get_cache_info_from_response(response)
1553
+ parsed_data = parse_model_page_html(response.text, tag_page_url)
1554
+ parsed_data['active_tag_part'] = norm_tp
1555
+ parsed_data['active_tag_full_name'] = make_full_tag_name(norm_ns, norm_mbn, norm_tp)
1556
+ for ts in parsed_data.get("all_tags_dropdown_summary", []):
1557
+ ts.is_active = (ts.tag_part == norm_tp)
1558
+
1559
+ return ModelPageResponse(**parsed_data, **cache_info)
1560
+
1561
+
1562
+ @app.get("/{namespace}/{model_base_name}/tags", response_model=AllTagsResponse, summary="List All Tags for a Model")
1563
+ async def list_all_tags(
1564
+ namespace: str = FastApiPath(..., description="Model namespace."),
1565
+ model_base_name: str = FastApiPath(..., description="Base name of the model.")
1566
+ ):
1567
+ norm_ns = namespace.lower()
1568
+ norm_mbn = model_base_name.lower()
1569
+ tags_page_url = f"{OLLAMA_COM_BASE_URL}/{norm_ns}/{norm_mbn}/tags"
1570
+ try:
1571
+ response = cached_session.get(tags_page_url)
1572
+ if response.status_code == 404:
1573
+ if norm_ns != 'library':
1574
+ library_tags_url = f"{OLLAMA_COM_BASE_URL}/library/{norm_mbn}/tags"
1575
+ response = cached_session.get(library_tags_url)
1576
+ if response.status_code == 404:
1577
+ raise HTTPException(status_code=404, detail=f"Tags page for model '{namespace}/{model_base_name}' not found.")
1578
+ norm_ns = 'library'
1579
+ tags_page_url = library_tags_url
1580
+ else:
1581
+ raise HTTPException(status_code=404, detail=f"Tags page for model '{namespace}/{model_base_name}' not found.")
1582
+ response.raise_for_status()
1583
+ except requests.exceptions.RequestException as e:
1584
+ raise HTTPException(status_code=503, detail=f"Failed to fetch tags page: {e}")
1585
+
1586
+ cache_info = get_cache_info_from_response(response)
1587
+ parsed_data = parse_all_tags_page_html(response.text, tags_page_url, norm_ns, norm_mbn)
1588
+
1589
+ return AllTagsResponse(**parsed_data, **cache_info)
1590
+
1591
+
1592
+ @app.get("/{namespace}/{model_base_name}", response_model=ModelPageResponse, summary="Get Model Details")
1593
+ async def get_model_details(
1594
+ namespace: str = FastApiPath(..., description="Model namespace."),
1595
+ model_base_name: str = FastApiPath(..., description="Base name of the model.")
1596
+ ):
1597
+ page_url = f"{OLLAMA_COM_BASE_URL}/{namespace.lower()}/{model_base_name.lower()}"
1598
+ try:
1599
+ response = cached_session.get(page_url)
1600
+ if response.status_code == 404:
1601
+ # Try with 'library' namespace if original was not 'library'
1602
+ if namespace.lower() != 'library':
1603
+ lib_page_url = f"{OLLAMA_COM_BASE_URL}/library/{model_base_name.lower()}"
1604
+ response = cached_session.get(lib_page_url)
1605
+ if response.status_code == 404:
1606
+ raise HTTPException(status_code=404, detail=f"Model '{namespace}/{model_base_name}' (and as 'library/{model_base_name}') not found.")
1607
+ page_url = lib_page_url # Use library URL if found
1608
+ else: # Original was 'library' and not found
1609
+ raise HTTPException(status_code=404, detail=f"Model '{namespace}/{model_base_name}' not found.")
1610
+ response.raise_for_status()
1611
+ except requests.exceptions.RequestException as e:
1612
+ raise HTTPException(status_code=503, detail=f"Failed to fetch model details: {e}")
1613
+
1614
+ cache_info = get_cache_info_from_response(response)
1615
+ parsed_data = parse_model_page_html(response.text, page_url)
1616
+ return ModelPageResponse(**parsed_data, **cache_info)
1617
+
1618
+
1619
+ @app.get("/{namespace}", response_model=ModelListByNamespaceResponse, summary="List Models by Namespace")
1620
+ async def list_models_by_namespace(
1621
+ namespace: str = FastApiPath(..., description="The model namespace (e.g., 'library', 'username')."),
1622
+ o: Optional[str] = Query('popular', description="Sort order: 'newest' or 'popular'."),
1623
+ c: Optional[str] = Query(None, description="Comma-separated list of capabilities to filter by.")
1624
+ ):
1625
+ if o not in ["newest", "popular"]:
1626
+ raise HTTPException(status_code=400, detail="Sort order 'o' must be 'newest' or 'popular'.")
1627
+
1628
+ norm_namespace = namespace.lower()
1629
+
1630
+ # Determine the actual path on ollama.com
1631
+ if norm_namespace == "library":
1632
+ ollama_com_path = "/library"
1633
+ else:
1634
+ ollama_com_path = f"/{norm_namespace}"
1635
+
1636
+ target_fetch_url = f"{OLLAMA_COM_BASE_URL}{ollama_com_path}"
1637
+
1638
+ params = {"sort": o} # ollama.com uses 'sort' for both /library and /{user} pages
1639
+ if c:
1640
+ params["c"] = c.lower()
1641
+
1642
+ try:
1643
+ response = cached_session.get(target_fetch_url, params=params)
1644
+ if response.status_code == 404:
1645
+ raise HTTPException(status_code=404, detail=f"Namespace '{namespace}' not found on ollama.com.")
1646
+ response.raise_for_status()
1647
+ except requests.exceptions.RequestException as e:
1648
+ raise HTTPException(status_code=503, detail=f"Failed to fetch models for namespace '{namespace}': {e}")
1649
+
1650
+ cache_info = get_cache_info_from_response(response)
1651
+ # The parse_list_or_search_page_html should work for both /library and /user_name pages
1652
+ # as the HTML structure for model listings is similar.
1653
+ parsed_results_data = parse_list_or_search_page_html(response.text, base_url=OLLAMA_COM_BASE_URL)
1654
+
1655
+ final_results = []
1656
+ capabilities_list_for_filterinfo = None
1657
+ if c:
1658
+ requested_caps = set(cap.strip().lower() for cap in c.split(','))
1659
+ capabilities_list_for_filterinfo = sorted(list(requested_caps))
1660
+ for model_data in parsed_results_data:
1661
+ model_caps_set = set(model_data.get("capabilities", []))
1662
+ if requested_caps.issubset(model_caps_set):
1663
+ final_results.append(ModelResultItem(**model_data))
1664
+ else:
1665
+ final_results = [ModelResultItem(**model_data) for model_data in parsed_results_data]
1666
+
1667
+ return ModelListByNamespaceResponse(
1668
+ queried_namespace=norm_namespace,
1669
+ sort_order=o,
1670
+ filters=FilterInfo(capabilities=capabilities_list_for_filterinfo) if capabilities_list_for_filterinfo else None,
1671
+ results=final_results,
1672
+ **cache_info
1673
+ )
1674
+
1675
+
1676
+ if not format.startswith('.'):
1677
+ format = f'.{format}'
1678
+ if not Static_Website and os.path.exists('static/index.html'):
1679
+ os.remove('static/index.html')
1680
+
1681
+ if Static_Website == False:
1682
+ filename = f'{FILETEMPTEXT}'
1683
+ # --- Main execution ---
1684
+ if __name__ == "__main__":
1685
+ print("Starting Ollama Library API (Live Fetch) server...")
1686
+ static_dir = os.path.join(os.path.dirname(__file__), "static")
1687
+ if not os.path.exists(static_dir):
1688
+ os.makedirs(static_dir)
1689
+ print(f"Created static directory: {static_dir}")
1690
+
1691
+ index_html_path = os.path.join(static_dir, filename)
1692
+ if not os.path.exists(index_html_path):
1693
+ # Replace placeholder before writing to file
1694
+ modified_html_content = dummy_html_content.replace("VERSION_BEING_REPLACED", CODE_VERSION)
1695
+
1696
+ # Write the modified content
1697
+ with open(index_html_path, "w") as f:
1698
+ f.write(modified_html_content)
1699
+
1700
+ print(f"Created dummy index.html at: {index_html_path}")
1701
+
1702
+ print("OpenAPI docs available at http://localhost:5115/docs")
1703
+ print("Landing page available at http://localhost:5115/")
1704
+ print("\nExample test URLs (fetches live data from ollama.com):")
1705
+ print(" Models in 'library' namespace (popular): http://localhost:5115/library?o=popular")
1706
+ print(" Models by user 'jmorganca' (popular): http://localhost:5115/jmorganca?o=popular")
1707
+ print(" Models in 'library' (newest, filter vision): http://localhost:5115/library?o=newest&c=vision")
1708
+ print(" Search 'qwen': http://localhost:5115/search?q=qwen&o=popular")
1709
+ print(" Model details (library/qwen2): http://localhost:5115/library/qwen2")
1710
+ print(" Model details (jmorganca/codellama): http://localhost:5115/jmorganca/codellama")
1711
+ print(" Model specific tag (library/qwen2:7b): http://localhost:5115/library/qwen2:7b")
1712
+ print(" All tags for (library/qwen2): http://localhost:5115/library/qwen2/tags")
1713
+ print(" Blob info (library/qwen2:7b, model): http://localhost:5115/library/qwen2:7b/blobs/model")
1714
+ print(" Blob info (library/qwen2:7b, params): http://localhost:5115/library/qwen2:7b/blobs/params")
1715
+
1716
+ uvicorn.run(app, host="0.0.0.0", port=5115)