abidlabs HF Staff commited on
Commit
1fcdcb3
·
verified ·
1 Parent(s): 4f35290

Upload folder using huggingface_hub

Browse files
Files changed (45) hide show
  1. .gitattributes +1 -0
  2. trackio/CHANGELOG.md +187 -0
  3. trackio/__init__.py +713 -0
  4. trackio/alerts.py +185 -0
  5. trackio/api.py +87 -0
  6. trackio/apple_gpu.py +253 -0
  7. trackio/assets/badge.png +0 -0
  8. trackio/assets/trackio_logo_dark.png +0 -0
  9. trackio/assets/trackio_logo_light.png +0 -0
  10. trackio/assets/trackio_logo_old.png +3 -0
  11. trackio/assets/trackio_logo_type_dark.png +0 -0
  12. trackio/assets/trackio_logo_type_dark_transparent.png +0 -0
  13. trackio/assets/trackio_logo_type_light.png +0 -0
  14. trackio/assets/trackio_logo_type_light_transparent.png +0 -0
  15. trackio/cli.py +1231 -0
  16. trackio/cli_helpers.py +158 -0
  17. trackio/commit_scheduler.py +310 -0
  18. trackio/context_vars.py +18 -0
  19. trackio/deploy.py +466 -0
  20. trackio/dummy_commit_scheduler.py +12 -0
  21. trackio/frontend/dist/assets/index-0Wf8YNCR.css +1 -0
  22. trackio/frontend/dist/assets/index-D1G_q77u.js +0 -0
  23. trackio/frontend/dist/index.html +14 -0
  24. trackio/frontend/eslint.config.js +42 -0
  25. trackio/frontend/index.html +13 -0
  26. trackio/frontend_server.py +63 -0
  27. trackio/gpu.py +357 -0
  28. trackio/histogram.py +71 -0
  29. trackio/imports.py +304 -0
  30. trackio/markdown.py +21 -0
  31. trackio/media/__init__.py +27 -0
  32. trackio/media/audio.py +167 -0
  33. trackio/media/image.py +84 -0
  34. trackio/media/media.py +79 -0
  35. trackio/media/utils.py +60 -0
  36. trackio/media/video.py +246 -0
  37. trackio/package.json +6 -0
  38. trackio/py.typed +0 -0
  39. trackio/remote_client.py +28 -0
  40. trackio/run.py +739 -0
  41. trackio/server.py +626 -0
  42. trackio/sqlite_storage.py +1821 -0
  43. trackio/table.py +173 -0
  44. trackio/typehints.py +39 -0
  45. trackio/utils.py +919 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ trackio/assets/trackio_logo_old.png filter=lfs diff=lfs merge=lfs -text
trackio/CHANGELOG.md ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # trackio
2
+
3
+ ## 0.20.0
4
+
5
+ ### Features
6
+
7
+ - [#450](https://github.com/gradio-app/trackio/pull/450) [`b0571ef`](https://github.com/gradio-app/trackio/commit/b0571ef6207a1ce346696f858ad2b7b584dd194f) - Use Svelte source for Gradio components directly in Trackio dashboard. Thanks @abidlabs!
8
+
9
+ ## 0.19.0
10
+
11
+ ### Features
12
+
13
+ - [#445](https://github.com/gradio-app/trackio/pull/445) [`cef4a58`](https://github.com/gradio-app/trackio/commit/cef4a583cb76f4091fc6c0e5783124ee84f8e243) - Add remote HF Space support to CLI. Thanks @abidlabs!
14
+ - [#444](https://github.com/gradio-app/trackio/pull/444) [`358f2a9`](https://github.com/gradio-app/trackio/commit/358f2a9ca238ee8b90b5a8c96220da287e0698fb) - Fix alerts placeholder flashing on reports page. Thanks @abidlabs!
15
+
16
+ ## 0.18.0
17
+
18
+ ### Features
19
+
20
+ - [#435](https://github.com/gradio-app/trackio/pull/435) [`4a47112`](https://github.com/gradio-app/trackio/commit/4a471128e18a39e45fad48a67fd711c5ae9e4aed) - feat: allow hiding section header accordions. Thanks @Saba9!
21
+ - [#439](https://github.com/gradio-app/trackio/pull/439) [`18e9650`](https://github.com/gradio-app/trackio/commit/18e96503d5a3a7cf926e92782d457e23c19942bd) - Add alerts with webhooks, CLI, and documentation. Thanks @abidlabs!
22
+ - [#438](https://github.com/gradio-app/trackio/pull/438) [`0875ccd`](https://github.com/gradio-app/trackio/commit/0875ccd3d8a41b1376f64030f21cfe8cdcc73b05) - Add "share this view" functionality. Thanks @qgallouedec!
23
+ - [#409](https://github.com/gradio-app/trackio/pull/409) [`9282403`](https://github.com/gradio-app/trackio/commit/9282403d8896d48679b0f888208a7ba5bdd4271a) - Add Apple Silicon GPU and system monitoring support. Thanks @znation!
24
+ - [#434](https://github.com/gradio-app/trackio/pull/434) [`4193223`](https://github.com/gradio-app/trackio/commit/41932230a3a2e1c16405dba08ecba5a42f11d1a8) - fix: table slider crash. Thanks @Saba9!
25
+
26
+ ### Fixes
27
+
28
+ - [#441](https://github.com/gradio-app/trackio/pull/441) [`3a2d11d`](https://github.com/gradio-app/trackio/commit/3a2d11dab0b4b37c925abc30ef84b0e2910321ee) - preserve x-axis step when toggling run checkboxes. Thanks @Saba9!
29
+
30
+ ## 0.17.0
31
+
32
+ ### Features
33
+
34
+ - [#428](https://github.com/gradio-app/trackio/pull/428) [`f7dd1ce`](https://github.com/gradio-app/trackio/commit/f7dd1ce2dc8a1936f9983467fcbcf93bfef01e09) - feat: add ability to rename runs. Thanks @Saba9!
35
+ - [#437](https://github.com/gradio-app/trackio/pull/437) [`2727c0b`](https://github.com/gradio-app/trackio/commit/2727c0b0755f48f7f186162ea45185c98f6b5516) - Add markdown reports across Trackio. Thanks @abidlabs!
36
+ - [#427](https://github.com/gradio-app/trackio/pull/427) [`5aeb9ed`](https://github.com/gradio-app/trackio/commit/5aeb9edcfd2068d309d9d64f172dcbcc327be1ab) - Make Trackio logging much more robust. Thanks @abidlabs!
37
+
38
+ ## 0.16.1
39
+
40
+ ### Features
41
+
42
+ - [#431](https://github.com/gradio-app/trackio/pull/431) [`c7ce55b`](https://github.com/gradio-app/trackio/commit/c7ce55b14dd5eb0c2165fb15df17dd60721c9325) - Lazy load the UI when trackio is imported. Thanks @abidlabs!
43
+
44
+ ## 0.16.0
45
+
46
+ ### Features
47
+
48
+ - [#426](https://github.com/gradio-app/trackio/pull/426) [`ead4dc8`](https://github.com/gradio-app/trackio/commit/ead4dc8e74ee2d8e47d61bca0a7668456acf49be) - Fix redundant double rendering of group checkboxes. Thanks @abidlabs!
49
+ - [#413](https://github.com/gradio-app/trackio/pull/413) [`39c4750`](https://github.com/gradio-app/trackio/commit/39c4750951d554ba6eb4d58847c6bb444b2891a8) - Check `dist-packages` when checking for source installation. Thanks @sergiopaniego!
50
+ - [#423](https://github.com/gradio-app/trackio/pull/423) [`2e52ab3`](https://github.com/gradio-app/trackio/commit/2e52ab303e3041718a6a56fbf84d0848aca9ad67) - Fix legend outline visibility issue. Thanks @Raghunath-Balaji!
51
+ - [#407](https://github.com/gradio-app/trackio/pull/407) [`c8a384d`](https://github.com/gradio-app/trackio/commit/c8a384ddfe5a295cecf862a26178d40e48acb424) - Fix pytests that were failling locally on MacOS. Thanks @abidlabs!
52
+ - [#405](https://github.com/gradio-app/trackio/pull/405) [`35aae4e`](https://github.com/gradio-app/trackio/commit/35aae4e3aa3e2b2888887528478b9dc6a9808bda) - Add conditional padding for HF Space dashboard when not in iframe. Thanks @znation!
53
+
54
+ ## 0.15.0
55
+
56
+ ### Features
57
+
58
+ - [#397](https://github.com/gradio-app/trackio/pull/397) [`6b38ad0`](https://github.com/gradio-app/trackio/commit/6b38ad02e5d73a0df49c4eede7e91331282ece04) - Adds `--host` cli option support. Thanks @abidlabs!
59
+ - [#396](https://github.com/gradio-app/trackio/pull/396) [`4a4d1ab`](https://github.com/gradio-app/trackio/commit/4a4d1ab85e63d923132a3fa7afa5d90e16431bec) - Fix run selection issue. Thanks @abidlabs!
60
+ - [#394](https://github.com/gradio-app/trackio/pull/394) [`c47a3a3`](https://github.com/gradio-app/trackio/commit/c47a3a31f8c4b83bce1aa7fc22eeba3d9021ad3d) - Add wandb-compatible API for trackio. Thanks @abidlabs!
61
+ - [#378](https://github.com/gradio-app/trackio/pull/378) [`b02046a`](https://github.com/gradio-app/trackio/commit/b02046a5b0dad7c9854e099a87f884afba4aecb2) - Add JSON export button for line plots and upgrade gradio dependency. Thanks @JamshedAli18!
62
+
63
+ ## 0.14.2
64
+
65
+ ### Features
66
+
67
+ - [#386](https://github.com/gradio-app/trackio/pull/386) [`f9452cd`](https://github.com/gradio-app/trackio/commit/f9452cdb8f0819368f3610f7ac0ed08957305275) - Fixing some issues related to deployed Trackio Spaces. Thanks @abidlabs!
68
+
69
+ ## 0.14.1
70
+
71
+ ### Features
72
+
73
+ - [#382](https://github.com/gradio-app/trackio/pull/382) [`44fe9bb`](https://github.com/gradio-app/trackio/commit/44fe9bb264fb2aafb0ec302ff15227c045819a2c) - Fix app file path when Trackio is not installed from source. Thanks @abidlabs!
74
+ - [#380](https://github.com/gradio-app/trackio/pull/380) [`c3f4cff`](https://github.com/gradio-app/trackio/commit/c3f4cff74bc5676e812773d8571454894fcdc7cc) - Add CLI commands for querying projects, runs, and metrics. Thanks @abidlabs!
75
+
76
+ ## 0.14.0
77
+
78
+ ### Features
79
+
80
+ - [#377](https://github.com/gradio-app/trackio/pull/377) [`5c5015b`](https://github.com/gradio-app/trackio/commit/5c5015b68c85c5de51111dad983f735c27b9a05f) - fixed wrapping issue in Runs table. Thanks @gaganchapa!
81
+ - [#374](https://github.com/gradio-app/trackio/pull/374) [`388e26b`](https://github.com/gradio-app/trackio/commit/388e26b9e9f24cd7ad203affe9b709be885b3d24) - Save Optimized Parquet files. Thanks @lhoestq!
82
+ - [#371](https://github.com/gradio-app/trackio/pull/371) [`fbace9c`](https://github.com/gradio-app/trackio/commit/fbace9cd7732c166f34d268f54b05bb06846cc5d) - Add GPU metrics logging. Thanks @kashif!
83
+ - [#367](https://github.com/gradio-app/trackio/pull/367) [`862840c`](https://github.com/gradio-app/trackio/commit/862840c13e30fc960cbee5b9eac4d3c25beba9de) - Add option to only show latest run, and fix the double logo issue. Thanks @abidlabs!
84
+
85
+ ## 0.13.1
86
+
87
+ ### Features
88
+
89
+ - [#369](https://github.com/gradio-app/trackio/pull/369) [`767e9fe`](https://github.com/gradio-app/trackio/commit/767e9fe095d7c6ed102016caf927c1517fb8618c) - tiny pr removing unnecessary code. Thanks @abidlabs!
90
+
91
+ ## 0.13.0
92
+
93
+ ### Features
94
+
95
+ - [#358](https://github.com/gradio-app/trackio/pull/358) [`073715d`](https://github.com/gradio-app/trackio/commit/073715d1caf8282f68890117f09c3ac301205312) - Improvements to `trackio.sync()`. Thanks @abidlabs!
96
+
97
+ ## 0.12.0
98
+
99
+ ### Features
100
+
101
+ - [#357](https://github.com/gradio-app/trackio/pull/357) [`02ba815`](https://github.com/gradio-app/trackio/commit/02ba815358060f1966052de051a5bdb09702920e) - Redesign media and tables to show up on separate page. Thanks @abidlabs!
102
+ - [#359](https://github.com/gradio-app/trackio/pull/359) [`08fe9c9`](https://github.com/gradio-app/trackio/commit/08fe9c9ddd7fe99ee811555fdfb62df9ab88e939) - docs: Improve docstrings. Thanks @qgallouedec!
103
+
104
+ ## 0.11.0
105
+
106
+ ### Features
107
+
108
+ - [#355](https://github.com/gradio-app/trackio/pull/355) [`ea51f49`](https://github.com/gradio-app/trackio/commit/ea51f4954922f21be76ef828700420fe9a912c4b) - Color code run checkboxes and match with plot lines. Thanks @abidlabs!
109
+ - [#353](https://github.com/gradio-app/trackio/pull/353) [`8abe691`](https://github.com/gradio-app/trackio/commit/8abe6919aeefe21fc7a23af814883efbb037c21f) - Remove show_api from demo.launch. Thanks @sergiopaniego!
110
+ - [#351](https://github.com/gradio-app/trackio/pull/351) [`8a8957e`](https://github.com/gradio-app/trackio/commit/8a8957e530dd7908d1fef7f2df030303f808101f) - Add `trackio.save()`. Thanks @abidlabs!
111
+
112
+ ## 0.10.0
113
+
114
+ ### Features
115
+
116
+ - [#305](https://github.com/gradio-app/trackio/pull/305) [`e64883a`](https://github.com/gradio-app/trackio/commit/e64883a51f7b8b93f7d48b8afe55acdb62238b71) - bump to gradio 6.0, make `trackio` compatible, and fix related issues. Thanks @abidlabs!
117
+
118
+ ## 0.9.1
119
+
120
+ ### Features
121
+
122
+ - [#344](https://github.com/gradio-app/trackio/pull/344) [`7e01024`](https://github.com/gradio-app/trackio/commit/7e010241d9a34794e0ce0dc19c1a6f0cf94ba856) - Avoid redundant calls to /whoami-v2. Thanks @Wauplin!
123
+
124
+ ## 0.9.0
125
+
126
+ ### Features
127
+
128
+ - [#343](https://github.com/gradio-app/trackio/pull/343) [`51bea30`](https://github.com/gradio-app/trackio/commit/51bea30f2877adff8e6497466d3a799400a0a049) - Sync offline projects to Hugging Face spaces. Thanks @candemircan!
129
+ - [#341](https://github.com/gradio-app/trackio/pull/341) [`4fd841f`](https://github.com/gradio-app/trackio/commit/4fd841fa190e15071b02f6fba7683ef4f393a654) - Adds a basic UI test to `trackio`. Thanks @abidlabs!
130
+ - [#339](https://github.com/gradio-app/trackio/pull/339) [`011d91b`](https://github.com/gradio-app/trackio/commit/011d91bb6ae266516fd250a349285670a8049d05) - Allow customzing the trackio color palette. Thanks @abidlabs!
131
+
132
+ ## 0.8.1
133
+
134
+ ### Features
135
+
136
+ - [#336](https://github.com/gradio-app/trackio/pull/336) [`5f9f51d`](https://github.com/gradio-app/trackio/commit/5f9f51dac8677f240d7c42c3e3b2660a22aee138) - Support a list of `Trackio.Image` in a `trackio.Table` cell. Thanks @abidlabs!
137
+
138
+ ## 0.8.0
139
+
140
+ ### Features
141
+
142
+ - [#331](https://github.com/gradio-app/trackio/pull/331) [`2c02d0f`](https://github.com/gradio-app/trackio/commit/2c02d0fd0a5824160528782402bb0dd4083396d5) - Truncate table string values that are greater than 250 characters (configuirable via env variable). Thanks @abidlabs!
143
+ - [#324](https://github.com/gradio-app/trackio/pull/324) [`50b2122`](https://github.com/gradio-app/trackio/commit/50b2122e7965ac82a72e6cb3b7d048bc10a2a6b1) - Add log y-axis functionality to UI. Thanks @abidlabs!
144
+ - [#326](https://github.com/gradio-app/trackio/pull/326) [`61dc1f4`](https://github.com/gradio-app/trackio/commit/61dc1f40af2f545f8e70395ddf0dbb8aee6b60d5) - Fix: improve table rendering for metrics in Trackio Dashboard. Thanks @vigneshwaran!
145
+ - [#328](https://github.com/gradio-app/trackio/pull/328) [`6857cbb`](https://github.com/gradio-app/trackio/commit/6857cbbe557a59a4642f210ec42566d108294e63) - Support trackio.Table with trackio.Image columns. Thanks @abidlabs!
146
+ - [#323](https://github.com/gradio-app/trackio/pull/323) [`6857cbb`](https://github.com/gradio-app/trackio/commit/6857cbbe557a59a4642f210ec42566d108294e63) - add Trackio client implementations in Go, Rust, and JS. Thanks @vaibhav-research!
147
+
148
+ ## 0.7.0
149
+
150
+ ### Features
151
+
152
+ - [#277](https://github.com/gradio-app/trackio/pull/277) [`db35601`](https://github.com/gradio-app/trackio/commit/db35601b9c023423c4654c9909b8ab73e58737de) - fix: make grouped runs view reflect live updates. Thanks @Saba9!
153
+ - [#320](https://github.com/gradio-app/trackio/pull/320) [`24ae739`](https://github.com/gradio-app/trackio/commit/24ae73969b09fb3126acd2f91647cdfbf8cf72a1) - Add additional query parms for xmin, xmax, and smoothing. Thanks @abidlabs!
154
+ - [#270](https://github.com/gradio-app/trackio/pull/270) [`cd1dfc3`](https://github.com/gradio-app/trackio/commit/cd1dfc3dc641b4499ac6d4a1b066fa8e2b52c57b) - feature: add support for logging audio. Thanks @Saba9!
155
+
156
+ ## 0.6.0
157
+
158
+ ### Features
159
+
160
+ - [#309](https://github.com/gradio-app/trackio/pull/309) [`1df2353`](https://github.com/gradio-app/trackio/commit/1df23534d6c01938c8db9c0f584ffa23e8d6021d) - Add histogram support with wandb-compatible API. Thanks @abidlabs!
161
+ - [#315](https://github.com/gradio-app/trackio/pull/315) [`76ba060`](https://github.com/gradio-app/trackio/commit/76ba06055dc43ca8f03b79f3e72d761949bd19a8) - Add guards to avoid silent fails. Thanks @Xmaster6y!
162
+ - [#313](https://github.com/gradio-app/trackio/pull/313) [`a606b3e`](https://github.com/gradio-app/trackio/commit/a606b3e1c5edf3d4cf9f31bd50605226a5a1c5d0) - No longer prevent certain keys from being used. Instead, dunderify them to prevent collisions with internal usage. Thanks @abidlabs!
163
+ - [#317](https://github.com/gradio-app/trackio/pull/317) [`27370a5`](https://github.com/gradio-app/trackio/commit/27370a595d0dbdf7eebbe7159d2ba778f039da44) - quick fixes for trackio.histogram. Thanks @abidlabs!
164
+ - [#312](https://github.com/gradio-app/trackio/pull/312) [`aa0f3bf`](https://github.com/gradio-app/trackio/commit/aa0f3bf372e7a0dd592a38af699c998363830eeb) - Fix video logging by adding TRACKIO_DIR to allowed_paths. Thanks @abidlabs!
165
+
166
+ ## 0.5.3
167
+
168
+ ### Features
169
+
170
+ - [#300](https://github.com/gradio-app/trackio/pull/300) [`5e4cacf`](https://github.com/gradio-app/trackio/commit/5e4cacf2e7ce527b4ce60de3a5bc05d2c02c77fb) - Adds more environment variables to allow customization of Trackio dashboard. Thanks @abidlabs!
171
+
172
+ ## 0.5.2
173
+
174
+ ### Features
175
+
176
+ - [#293](https://github.com/gradio-app/trackio/pull/293) [`64afc28`](https://github.com/gradio-app/trackio/commit/64afc28d3ea1dfd821472dc6bf0b8ed35a9b74be) - Ensures that the TRACKIO_DIR environment variable is respected. Thanks @abidlabs!
177
+ - [#287](https://github.com/gradio-app/trackio/pull/287) [`cd3e929`](https://github.com/gradio-app/trackio/commit/cd3e9294320949e6b8b829239069a43d5d7ff4c1) - fix(sqlite): unify .sqlite extension, allow export when DBs exist, clean WAL sidecars on import. Thanks @vaibhav-research!
178
+
179
+ ### Fixes
180
+
181
+ - [#291](https://github.com/gradio-app/trackio/pull/291) [`3b5adc3`](https://github.com/gradio-app/trackio/commit/3b5adc3d1f452dbab7a714d235f4974782f93730) - Fix the wheel build. Thanks @pngwn!
182
+
183
+ ## 0.5.1
184
+
185
+ ### Fixes
186
+
187
+ - [#278](https://github.com/gradio-app/trackio/pull/278) [`314c054`](https://github.com/gradio-app/trackio/commit/314c05438007ddfea3383e06fd19143e27468e2d) - Fix row orientation of metrics plots. Thanks @abidlabs!
trackio/__init__.py ADDED
@@ -0,0 +1,713 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ import glob
3
+ import json
4
+ import logging
5
+ import os
6
+ import shutil
7
+ import warnings
8
+ import webbrowser
9
+ from pathlib import Path
10
+ from typing import Any
11
+
12
+ import huggingface_hub
13
+ from gradio.themes import ThemeClass
14
+ from gradio.utils import TupleNoPrint
15
+ from gradio_client import Client, handle_file
16
+ from huggingface_hub import SpaceStorage
17
+ from huggingface_hub.errors import LocalTokenNotFoundError
18
+
19
+ from trackio import context_vars, deploy, utils
20
+ from trackio.alerts import AlertLevel
21
+ from trackio.api import Api
22
+ from trackio.apple_gpu import apple_gpu_available
23
+ from trackio.apple_gpu import log_apple_gpu as _log_apple_gpu
24
+ from trackio.deploy import sync
25
+ from trackio.frontend_server import mount_frontend
26
+ from trackio.gpu import gpu_available
27
+ from trackio.gpu import log_gpu as _log_nvidia_gpu
28
+ from trackio.histogram import Histogram
29
+ from trackio.imports import import_csv, import_tf_events
30
+ from trackio.markdown import Markdown
31
+ from trackio.media import (
32
+ TrackioAudio,
33
+ TrackioImage,
34
+ TrackioVideo,
35
+ get_project_media_path,
36
+ )
37
+ from trackio.run import Run
38
+ from trackio.server import make_trackio_server
39
+ from trackio.sqlite_storage import SQLiteStorage
40
+ from trackio.table import Table
41
+ from trackio.typehints import UploadEntry
42
+ from trackio.utils import TRACKIO_DIR, TRACKIO_LOGO_DIR
43
+
44
+ logging.getLogger("httpx").setLevel(logging.WARNING)
45
+
46
+ warnings.filterwarnings(
47
+ "ignore",
48
+ message="Empty session being created. Install gradio\\[oauth\\]",
49
+ category=UserWarning,
50
+ module="gradio.helpers",
51
+ )
52
+
53
+ __version__ = json.loads(Path(__file__).parent.joinpath("package.json").read_text())[
54
+ "version"
55
+ ]
56
+
57
+ __all__ = [
58
+ "init",
59
+ "log",
60
+ "log_system",
61
+ "log_gpu",
62
+ "log_apple_gpu",
63
+ "finish",
64
+ "alert",
65
+ "AlertLevel",
66
+ "show",
67
+ "sync",
68
+ "delete_project",
69
+ "import_csv",
70
+ "import_tf_events",
71
+ "save",
72
+ "Image",
73
+ "Video",
74
+ "Audio",
75
+ "Table",
76
+ "Histogram",
77
+ "Markdown",
78
+ "Api",
79
+ ]
80
+
81
+ Image = TrackioImage
82
+ Video = TrackioVideo
83
+ Audio = TrackioAudio
84
+
85
+
86
+ config = {}
87
+
88
+ _atexit_registered = False
89
+
90
+
91
+ def _cleanup_current_run():
92
+ run = context_vars.current_run.get()
93
+ if run is not None:
94
+ try:
95
+ run.finish()
96
+ except Exception:
97
+ pass
98
+
99
+
100
+ def init(
101
+ project: str,
102
+ name: str | None = None,
103
+ group: str | None = None,
104
+ space_id: str | None = None,
105
+ space_storage: SpaceStorage | None = None,
106
+ dataset_id: str | None = None,
107
+ config: dict | None = None,
108
+ resume: str = "never",
109
+ settings: Any = None,
110
+ private: bool | None = None,
111
+ embed: bool = True,
112
+ auto_log_gpu: bool | None = None,
113
+ gpu_log_interval: float = 10.0,
114
+ webhook_url: str | None = None,
115
+ webhook_min_level: AlertLevel | str | None = None,
116
+ ) -> Run:
117
+ """
118
+ Creates a new Trackio project and returns a [`Run`] object.
119
+
120
+ Args:
121
+ project (`str`):
122
+ The name of the project (can be an existing project to continue tracking or
123
+ a new project to start tracking from scratch).
124
+ name (`str`, *optional*):
125
+ The name of the run (if not provided, a default name will be generated).
126
+ group (`str`, *optional*):
127
+ The name of the group which this run belongs to in order to help organize
128
+ related runs together. You can toggle the entire group's visibilitiy in the
129
+ dashboard.
130
+ space_id (`str`, *optional*):
131
+ If provided, the project will be logged to a Hugging Face Space instead of
132
+ a local directory. Should be a complete Space name like
133
+ `"username/reponame"` or `"orgname/reponame"`, or just `"reponame"` in which
134
+ case the Space will be created in the currently-logged-in Hugging Face
135
+ user's namespace. If the Space does not exist, it will be created. If the
136
+ Space already exists, the project will be logged to it.
137
+ space_storage ([`~huggingface_hub.SpaceStorage`], *optional*):
138
+ Choice of persistent storage tier.
139
+ dataset_id (`str`, *optional*):
140
+ If a `space_id` is provided, a persistent Hugging Face Dataset will be
141
+ created and the metrics will be synced to it every 5 minutes. Specify a
142
+ Dataset with name like `"username/datasetname"` or `"orgname/datasetname"`,
143
+ or `"datasetname"` (uses currently-logged-in Hugging Face user's namespace),
144
+ or `None` (uses the same name as the Space but with the `"_dataset"`
145
+ suffix). If the Dataset does not exist, it will be created. If the Dataset
146
+ already exists, the project will be appended to it.
147
+ config (`dict`, *optional*):
148
+ A dictionary of configuration options. Provided for compatibility with
149
+ `wandb.init()`.
150
+ resume (`str`, *optional*, defaults to `"never"`):
151
+ Controls how to handle resuming a run. Can be one of:
152
+
153
+ - `"must"`: Must resume the run with the given name, raises error if run
154
+ doesn't exist
155
+ - `"allow"`: Resume the run if it exists, otherwise create a new run
156
+ - `"never"`: Never resume a run, always create a new one
157
+ private (`bool`, *optional*):
158
+ Whether to make the Space private. If None (default), the repo will be
159
+ public unless the organization's default is private. This value is ignored
160
+ if the repo already exists.
161
+ settings (`Any`, *optional*):
162
+ Not used. Provided for compatibility with `wandb.init()`.
163
+ embed (`bool`, *optional*, defaults to `True`):
164
+ If running inside a Jupyter/Colab notebook, whether the dashboard should
165
+ automatically be embedded in the cell when trackio.init() is called. For
166
+ local runs, this launches a local Gradio app and embeds it. For Space runs,
167
+ this embeds the Space URL. In Colab, the local dashboard will be accessible
168
+ via a public share URL (default Gradio behavior).
169
+ auto_log_gpu (`bool` or `None`, *optional*, defaults to `None`):
170
+ Controls automatic GPU metrics logging. If `None` (default), GPU logging
171
+ is automatically enabled when `nvidia-ml-py` is installed and an NVIDIA
172
+ GPU or Apple M series is detected. Set to `True` to force enable or
173
+ `False` to disable.
174
+ gpu_log_interval (`float`, *optional*, defaults to `10.0`):
175
+ The interval in seconds between automatic GPU metric logs.
176
+ Only used when `auto_log_gpu=True`.
177
+ webhook_url (`str`, *optional*):
178
+ A webhook URL to POST alert payloads to when `trackio.alert()` is
179
+ called. Supports Slack and Discord webhook URLs natively (payloads
180
+ are formatted automatically). Can also be set via the
181
+ `TRACKIO_WEBHOOK_URL` environment variable. Individual alerts can
182
+ override this URL by passing `webhook_url` to `trackio.alert()`.
183
+ webhook_min_level (`AlertLevel` or `str`, *optional*):
184
+ Minimum alert level that should trigger webhook delivery.
185
+ For example, `AlertLevel.WARN` sends only `WARN` and `ERROR`
186
+ alerts to the webhook destination. Can also be set via
187
+ `TRACKIO_WEBHOOK_MIN_LEVEL`.
188
+ Returns:
189
+ `Run`: A [`Run`] object that can be used to log metrics and finish the run.
190
+ """
191
+ if settings is not None:
192
+ warnings.warn(
193
+ "* Warning: settings is not used. Provided for compatibility with wandb.init(). Please create an issue at: https://github.com/gradio-app/trackio/issues if you need a specific feature implemented."
194
+ )
195
+
196
+ if space_id is None and dataset_id is not None:
197
+ raise ValueError("Must provide a `space_id` when `dataset_id` is provided.")
198
+ try:
199
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(
200
+ space_id, dataset_id
201
+ )
202
+ except LocalTokenNotFoundError as e:
203
+ raise LocalTokenNotFoundError(
204
+ f"You must be logged in to Hugging Face locally when `space_id` is provided to deploy to a Space. {e}"
205
+ ) from e
206
+
207
+ url = context_vars.current_server.get()
208
+
209
+ if space_id is not None:
210
+ if url is None:
211
+ url = space_id
212
+ context_vars.current_server.set(url)
213
+ context_vars.current_space_id.set(space_id)
214
+
215
+ _should_embed_local = False
216
+
217
+ if (
218
+ context_vars.current_project.get() is None
219
+ or context_vars.current_project.get() != project
220
+ ):
221
+ print(f"* Trackio project initialized: {project}")
222
+
223
+ if dataset_id is not None:
224
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
225
+ print(
226
+ f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}"
227
+ )
228
+ if space_id is None:
229
+ print(f"* Trackio metrics logged to: {TRACKIO_DIR}")
230
+ _should_embed_local = embed and utils.is_in_notebook()
231
+ if not _should_embed_local:
232
+ utils.print_dashboard_instructions(project)
233
+ else:
234
+ deploy.create_space_if_not_exists(
235
+ space_id, space_storage, dataset_id, private
236
+ )
237
+ user_name, space_name = space_id.split("/")
238
+ space_url = deploy.SPACE_HOST_URL.format(
239
+ user_name=user_name, space_name=space_name
240
+ )
241
+ print(f"* View dashboard by going to: {space_url}")
242
+ if utils.is_in_notebook() and embed:
243
+ utils.embed_url_in_notebook(space_url)
244
+ context_vars.current_project.set(project)
245
+
246
+ if resume == "must":
247
+ if name is None:
248
+ raise ValueError("Must provide a run name when resume='must'")
249
+ if name not in SQLiteStorage.get_runs(project):
250
+ raise ValueError(f"Run '{name}' does not exist in project '{project}'")
251
+ resumed = True
252
+ elif resume == "allow":
253
+ resumed = name is not None and name in SQLiteStorage.get_runs(project)
254
+ elif resume == "never":
255
+ if name is not None and name in SQLiteStorage.get_runs(project):
256
+ warnings.warn(
257
+ f"* Warning: resume='never' but a run '{name}' already exists in "
258
+ f"project '{project}'. Generating a new name and instead. If you want "
259
+ "to resume this run, call init() with resume='must' or resume='allow'."
260
+ )
261
+ name = None
262
+ resumed = False
263
+ else:
264
+ raise ValueError("resume must be one of: 'must', 'allow', or 'never'")
265
+
266
+ if auto_log_gpu is None:
267
+ nvidia_available = gpu_available()
268
+ apple_available = apple_gpu_available()
269
+ auto_log_gpu = nvidia_available or apple_available
270
+ if nvidia_available:
271
+ print("* NVIDIA GPU detected, enabling automatic GPU metrics logging")
272
+ elif apple_available:
273
+ print("* Apple Silicon detected, enabling automatic system metrics logging")
274
+
275
+ run = Run(
276
+ url=url,
277
+ project=project,
278
+ client=None,
279
+ name=name,
280
+ group=group,
281
+ config=config,
282
+ space_id=space_id,
283
+ auto_log_gpu=auto_log_gpu,
284
+ gpu_log_interval=gpu_log_interval,
285
+ webhook_url=webhook_url,
286
+ webhook_min_level=webhook_min_level,
287
+ )
288
+
289
+ if space_id is not None:
290
+ SQLiteStorage.set_project_metadata(project, "space_id", space_id)
291
+ if SQLiteStorage.has_pending_data(project):
292
+ run._has_local_buffer = True
293
+
294
+ global _atexit_registered
295
+ if not _atexit_registered:
296
+ atexit.register(_cleanup_current_run)
297
+ _atexit_registered = True
298
+
299
+ if resumed:
300
+ print(f"* Resumed existing run: {run.name}")
301
+ else:
302
+ print(f"* Created new run: {run.name}")
303
+
304
+ context_vars.current_run.set(run)
305
+ globals()["config"] = run.config
306
+
307
+ if _should_embed_local:
308
+ show(project=project, open_browser=False, block_thread=False)
309
+
310
+ return run
311
+
312
+
313
+ def log(metrics: dict, step: int | None = None) -> None:
314
+ """
315
+ Logs metrics to the current run.
316
+
317
+ Args:
318
+ metrics (`dict`):
319
+ A dictionary of metrics to log.
320
+ step (`int`, *optional*):
321
+ The step number. If not provided, the step will be incremented
322
+ automatically.
323
+ """
324
+ run = context_vars.current_run.get()
325
+ if run is None:
326
+ raise RuntimeError("Call trackio.init() before trackio.log().")
327
+ run.log(
328
+ metrics=metrics,
329
+ step=step,
330
+ )
331
+
332
+
333
+ def log_system(metrics: dict) -> None:
334
+ """
335
+ Logs system metrics (GPU, etc.) to the current run using timestamps instead of steps.
336
+
337
+ Args:
338
+ metrics (`dict`):
339
+ A dictionary of system metrics to log.
340
+ """
341
+ run = context_vars.current_run.get()
342
+ if run is None:
343
+ raise RuntimeError("Call trackio.init() before trackio.log_system().")
344
+ run.log_system(metrics=metrics)
345
+
346
+
347
+ def log_gpu(run: Run | None = None, device: int | None = None) -> dict:
348
+ """
349
+ Log GPU metrics to the current or specified run as system metrics.
350
+ Automatically detects whether an NVIDIA or Apple GPU is available and calls
351
+ the appropriate logging method.
352
+
353
+ Args:
354
+ run: Optional Run instance. If None, uses current run from context.
355
+ device: CUDA device index to collect metrics from (NVIDIA GPUs only).
356
+ If None, collects from all GPUs visible to this process.
357
+ This parameter is ignored for Apple GPUs.
358
+
359
+ Returns:
360
+ dict: The GPU metrics that were logged.
361
+
362
+ Example:
363
+ ```python
364
+ import trackio
365
+
366
+ run = trackio.init(project="my-project")
367
+ trackio.log({"loss": 0.5})
368
+ trackio.log_gpu()
369
+ trackio.log_gpu(device=0)
370
+ ```
371
+ """
372
+ if run is None:
373
+ run = context_vars.current_run.get()
374
+ if run is None:
375
+ raise RuntimeError("Call trackio.init() before trackio.log_gpu().")
376
+
377
+ if gpu_available():
378
+ return _log_nvidia_gpu(run=run, device=device)
379
+ elif apple_gpu_available():
380
+ return _log_apple_gpu(run=run)
381
+ else:
382
+ warnings.warn(
383
+ "No GPU detected. Install nvidia-ml-py for NVIDIA GPU support "
384
+ "or psutil for Apple Silicon support."
385
+ )
386
+ return {}
387
+
388
+
389
+ def finish():
390
+ """
391
+ Finishes the current run.
392
+ """
393
+ run = context_vars.current_run.get()
394
+ if run is None:
395
+ raise RuntimeError("Call trackio.init() before trackio.finish().")
396
+ run.finish()
397
+
398
+
399
+ def alert(
400
+ title: str,
401
+ text: str | None = None,
402
+ level: AlertLevel = AlertLevel.WARN,
403
+ webhook_url: str | None = None,
404
+ ) -> None:
405
+ """
406
+ Fires an alert immediately on the current run. The alert is printed to the
407
+ terminal, stored in the database, and displayed in the dashboard. If a
408
+ webhook URL is configured (via `trackio.init()`, the `TRACKIO_WEBHOOK_URL`
409
+ environment variable, or the `webhook_url` parameter here), the alert is
410
+ also POSTed to that URL.
411
+
412
+ Args:
413
+ title (`str`):
414
+ A short title for the alert.
415
+ text (`str`, *optional*):
416
+ A longer description with details about the alert.
417
+ level (`AlertLevel`, *optional*, defaults to `AlertLevel.WARN`):
418
+ The severity level. One of `AlertLevel.INFO`, `AlertLevel.WARN`,
419
+ or `AlertLevel.ERROR`.
420
+ webhook_url (`str`, *optional*):
421
+ A webhook URL to send this specific alert to. Overrides any
422
+ URL set in `trackio.init()` or the `TRACKIO_WEBHOOK_URL`
423
+ environment variable. Supports Slack and Discord webhook
424
+ URLs natively.
425
+ """
426
+ run = context_vars.current_run.get()
427
+ if run is None:
428
+ raise RuntimeError("Call trackio.init() before trackio.alert().")
429
+ run.alert(title=title, text=text, level=level, webhook_url=webhook_url)
430
+
431
+
432
+ def delete_project(project: str, force: bool = False) -> bool:
433
+ """
434
+ Deletes a project by removing its local SQLite database.
435
+
436
+ Args:
437
+ project (`str`):
438
+ The name of the project to delete.
439
+ force (`bool`, *optional*, defaults to `False`):
440
+ If `True`, deletes the project without prompting for confirmation.
441
+ If `False`, prompts the user to confirm before deleting.
442
+
443
+ Returns:
444
+ `bool`: `True` if the project was deleted, `False` otherwise.
445
+ """
446
+ db_path = SQLiteStorage.get_project_db_path(project)
447
+
448
+ if not db_path.exists():
449
+ print(f"* Project '{project}' does not exist.")
450
+ return False
451
+
452
+ if not force:
453
+ response = input(
454
+ f"Are you sure you want to delete project '{project}'? "
455
+ f"This will permanently delete all runs and metrics. (y/N): "
456
+ )
457
+ if response.lower() not in ["y", "yes"]:
458
+ print("* Deletion cancelled.")
459
+ return False
460
+
461
+ try:
462
+ db_path.unlink()
463
+
464
+ for suffix in ("-wal", "-shm"):
465
+ sidecar = Path(str(db_path) + suffix)
466
+ if sidecar.exists():
467
+ sidecar.unlink()
468
+
469
+ print(f"* Project '{project}' has been deleted.")
470
+ return True
471
+ except Exception as e:
472
+ print(f"* Error deleting project '{project}': {e}")
473
+ return False
474
+
475
+
476
+ def save(
477
+ glob_str: str | Path,
478
+ project: str | None = None,
479
+ ) -> str:
480
+ """
481
+ Saves files to a project (not linked to a specific run). If Trackio is running
482
+ locally, the file(s) will be copied to the project's files directory. If Trackio is
483
+ running in a Space, the file(s) will be uploaded to the Space's files directory.
484
+
485
+ Args:
486
+ glob_str (`str` or `Path`):
487
+ The file path or glob pattern to save. Can be a single file or a pattern
488
+ matching multiple files (e.g., `"*.py"`, `"models/**/*.pth"`).
489
+ project (`str`, *optional*):
490
+ The name of the project to save files to. If not provided, uses the current
491
+ project from `trackio.init()`. If no project is initialized, raises an
492
+ error.
493
+
494
+ Returns:
495
+ `str`: The path where the file(s) were saved (project's files directory).
496
+
497
+ Example:
498
+ ```python
499
+ import trackio
500
+
501
+ trackio.init(project="my-project")
502
+ trackio.save("config.yaml")
503
+ trackio.save("models/*.pth")
504
+ ```
505
+ """
506
+ if project is None:
507
+ project = context_vars.current_project.get()
508
+ if project is None:
509
+ raise RuntimeError(
510
+ "No project specified. Either call trackio.init() first or provide a "
511
+ "project parameter to trackio.save()."
512
+ )
513
+
514
+ glob_str = Path(glob_str)
515
+ base_path = Path.cwd().resolve()
516
+
517
+ matched_files = []
518
+ if glob_str.is_file():
519
+ matched_files = [glob_str.resolve()]
520
+ else:
521
+ pattern = str(glob_str)
522
+ if not glob_str.is_absolute():
523
+ pattern = str((Path.cwd() / glob_str).resolve())
524
+ matched_files = [
525
+ Path(f).resolve()
526
+ for f in glob.glob(pattern, recursive=True)
527
+ if Path(f).is_file()
528
+ ]
529
+
530
+ if not matched_files:
531
+ raise ValueError(f"No files found matching pattern: {glob_str}")
532
+
533
+ current_run = context_vars.current_run.get()
534
+ is_local = (
535
+ current_run._is_local
536
+ if current_run is not None
537
+ else (context_vars.current_space_id.get() is None)
538
+ )
539
+
540
+ if is_local:
541
+ for file_path in matched_files:
542
+ try:
543
+ relative_to_base = file_path.relative_to(base_path)
544
+ except ValueError:
545
+ relative_to_base = Path(file_path.name)
546
+
547
+ if current_run is not None:
548
+ current_run._queue_upload(
549
+ file_path,
550
+ step=None,
551
+ relative_path=str(relative_to_base.parent),
552
+ use_run_name=False,
553
+ )
554
+ else:
555
+ media_path = get_project_media_path(
556
+ project=project,
557
+ run=None,
558
+ step=None,
559
+ relative_path=str(relative_to_base),
560
+ )
561
+ shutil.copy(str(file_path), str(media_path))
562
+ else:
563
+ url = context_vars.current_server.get()
564
+
565
+ upload_entries = []
566
+ for file_path in matched_files:
567
+ try:
568
+ relative_to_base = file_path.relative_to(base_path)
569
+ except ValueError:
570
+ relative_to_base = Path(file_path.name)
571
+
572
+ if current_run is not None:
573
+ current_run._queue_upload(
574
+ file_path,
575
+ step=None,
576
+ relative_path=str(relative_to_base.parent),
577
+ use_run_name=False,
578
+ )
579
+ else:
580
+ upload_entry: UploadEntry = {
581
+ "project": project,
582
+ "run": None,
583
+ "step": None,
584
+ "relative_path": str(relative_to_base),
585
+ "uploaded_file": handle_file(file_path),
586
+ }
587
+ upload_entries.append(upload_entry)
588
+
589
+ if upload_entries:
590
+ if url is None:
591
+ raise RuntimeError(
592
+ "No server available. Call trackio.init() before trackio.save() to start the server."
593
+ )
594
+
595
+ try:
596
+ client = Client(url, verbose=False, httpx_kwargs={"timeout": 90})
597
+ client.predict(
598
+ api_name="/bulk_upload_media",
599
+ uploads=upload_entries,
600
+ hf_token=huggingface_hub.utils.get_token(),
601
+ )
602
+ except Exception as e:
603
+ warnings.warn(
604
+ f"Failed to upload files: {e}. "
605
+ "Files may not be available in the dashboard."
606
+ )
607
+
608
+ return str(utils.MEDIA_DIR / project / "files")
609
+
610
+
611
+ def show(
612
+ project: str | None = None,
613
+ *,
614
+ theme: str | ThemeClass | None = None,
615
+ mcp_server: bool | None = None,
616
+ footer: bool = True,
617
+ color_palette: list[str] | None = None,
618
+ open_browser: bool = True,
619
+ block_thread: bool | None = None,
620
+ host: str | None = None,
621
+ ):
622
+ """
623
+ Launches the Trackio dashboard.
624
+
625
+ Args:
626
+ project (`str`, *optional*):
627
+ The name of the project whose runs to show. If not provided, all projects
628
+ will be shown and the user can select one.
629
+ theme (`str` or `ThemeClass`, *optional*):
630
+ A Gradio Theme to use for the dashboard instead of the default Gradio theme,
631
+ can be a built-in theme (e.g. `'soft'`, `'citrus'`), a theme from the Hub
632
+ (e.g. `"gstaff/xkcd"`), or a custom Theme class. If not provided, the
633
+ `TRACKIO_THEME` environment variable will be used, or if that is not set,
634
+ the default Gradio theme will be used.
635
+ mcp_server (`bool`, *optional*):
636
+ If `True`, the Trackio dashboard will be set up as an MCP server and certain
637
+ functions will be added as MCP tools. If `None` (default behavior), then the
638
+ `GRADIO_MCP_SERVER` environment variable will be used to determine if the
639
+ MCP server should be enabled (which is `"True"` on Hugging Face Spaces).
640
+ footer (`bool`, *optional*, defaults to `True`):
641
+ Whether to show the Gradio footer. When `False`, the footer will be hidden.
642
+ This can also be controlled via the `footer` query parameter in the URL.
643
+ color_palette (`list[str]`, *optional*):
644
+ A list of hex color codes to use for plot lines. If not provided, the
645
+ `TRACKIO_COLOR_PALETTE` environment variable will be used (comma-separated
646
+ hex codes), or if that is not set, the default color palette will be used.
647
+ Example: `['#FF0000', '#00FF00', '#0000FF']`
648
+ open_browser (`bool`, *optional*, defaults to `True`):
649
+ If `True` and not in a notebook, a new browser tab will be opened with the
650
+ dashboard. If `False`, the browser will not be opened.
651
+ block_thread (`bool`, *optional*):
652
+ If `True`, the main thread will be blocked until the dashboard is closed.
653
+ If `None` (default behavior), then the main thread will not be blocked if the
654
+ dashboard is launched in a notebook, otherwise the main thread will be blocked.
655
+ host (`str`, *optional*):
656
+ The host to bind the server to. If not provided, defaults to `'127.0.0.1'`
657
+ (localhost only). Set to `'0.0.0.0'` to allow remote access.
658
+
659
+ Returns:
660
+ `app`: The Gradio app object corresponding to the dashboard launched by Trackio.
661
+ `url`: The local URL of the dashboard.
662
+ `share_url`: The public share URL of the dashboard.
663
+ `full_url`: The full URL of the dashboard including the write token (will use the public share URL if launched publicly, otherwise the local URL).
664
+ """
665
+ if color_palette is not None:
666
+ os.environ["TRACKIO_COLOR_PALETTE"] = ",".join(color_palette)
667
+
668
+ theme = theme or os.environ.get("TRACKIO_THEME")
669
+
670
+ _mcp_server = (
671
+ mcp_server
672
+ if mcp_server is not None
673
+ else os.environ.get("GRADIO_MCP_SERVER", "False") == "True"
674
+ )
675
+
676
+ server = make_trackio_server()
677
+ mount_frontend(server)
678
+
679
+ _, url, share_url = server.launch(
680
+ quiet=True,
681
+ inline=False,
682
+ prevent_thread_lock=True,
683
+ favicon_path=TRACKIO_LOGO_DIR / "trackio_logo_light.png",
684
+ allowed_paths=[TRACKIO_LOGO_DIR, TRACKIO_DIR],
685
+ mcp_server=_mcp_server,
686
+ theme=theme,
687
+ server_name=host,
688
+ )
689
+
690
+ base_url = share_url + "/" if share_url else url
691
+ dashboard_url = base_url.rstrip("/") + "/"
692
+ if project:
693
+ dashboard_url += f"?project={project}"
694
+ full_url = utils.get_full_url(
695
+ base_url.rstrip("/"),
696
+ project=project,
697
+ write_token=server.write_token,
698
+ footer=footer,
699
+ )
700
+
701
+ if not utils.is_in_notebook():
702
+ print(f"* Trackio UI launched at: {dashboard_url}")
703
+ print(f"* Gradio API available at: {base_url}")
704
+ if open_browser:
705
+ webbrowser.open(dashboard_url)
706
+ block_thread = block_thread if block_thread is not None else True
707
+ else:
708
+ utils.embed_url_in_notebook(dashboard_url)
709
+ block_thread = block_thread if block_thread is not None else False
710
+
711
+ if block_thread:
712
+ utils.block_main_thread_until_keyboard_interrupt()
713
+ return TupleNoPrint((server, url, share_url, full_url))
trackio/alerts.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import ssl
4
+ import urllib.error
5
+ import urllib.request
6
+ from enum import Enum
7
+
8
+ try:
9
+ import certifi
10
+
11
+ _SSL_CONTEXT = ssl.create_default_context(cafile=certifi.where())
12
+ except ImportError:
13
+ _SSL_CONTEXT = None
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class AlertLevel(str, Enum):
19
+ INFO = "info"
20
+ WARN = "warn"
21
+ ERROR = "error"
22
+
23
+
24
+ ALERT_LEVEL_ORDER = {
25
+ AlertLevel.INFO: 0,
26
+ AlertLevel.WARN: 1,
27
+ AlertLevel.ERROR: 2,
28
+ }
29
+
30
+ ALERT_COLORS = {
31
+ AlertLevel.INFO: "\033[94m",
32
+ AlertLevel.WARN: "\033[93m",
33
+ AlertLevel.ERROR: "\033[91m",
34
+ }
35
+ RESET_COLOR = "\033[0m"
36
+
37
+ LEVEL_EMOJI = {
38
+ AlertLevel.INFO: "ℹ️",
39
+ AlertLevel.WARN: "⚠️",
40
+ AlertLevel.ERROR: "🚨",
41
+ }
42
+
43
+
44
+ def format_alert_terminal(
45
+ level: AlertLevel, title: str, text: str | None, step: int | None
46
+ ) -> str:
47
+ color = ALERT_COLORS.get(level, "")
48
+ step_str = f" (step {step})" if step is not None else ""
49
+ if text:
50
+ return f"{color}[TRACKIO {level.value.upper()}]{RESET_COLOR} {title}: {text}{step_str}"
51
+ return f"{color}[TRACKIO {level.value.upper()}]{RESET_COLOR} {title}{step_str}"
52
+
53
+
54
+ def _is_slack_url(url: str) -> bool:
55
+ return "hooks.slack.com" in url
56
+
57
+
58
+ def _is_discord_url(url: str) -> bool:
59
+ return "discord.com/api/webhooks" in url or "discordapp.com/api/webhooks" in url
60
+
61
+
62
+ def _build_slack_payload(
63
+ level: AlertLevel,
64
+ title: str,
65
+ text: str | None,
66
+ project: str,
67
+ run: str,
68
+ step: int | None,
69
+ ) -> dict:
70
+ emoji = LEVEL_EMOJI.get(level, "")
71
+ step_str = f" • Step {step}" if step is not None else ""
72
+ header = f"{emoji} *[{level.value.upper()}] {title}*"
73
+ context = f"Project: {project} • Run: {run}{step_str}"
74
+ blocks = [
75
+ {"type": "section", "text": {"type": "mrkdwn", "text": header}},
76
+ ]
77
+ if text:
78
+ blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": text}})
79
+ blocks.append(
80
+ {"type": "context", "elements": [{"type": "mrkdwn", "text": context}]}
81
+ )
82
+ return {"blocks": blocks}
83
+
84
+
85
+ def _build_discord_payload(
86
+ level: AlertLevel,
87
+ title: str,
88
+ text: str | None,
89
+ project: str,
90
+ run: str,
91
+ step: int | None,
92
+ ) -> dict:
93
+ color_map = {
94
+ AlertLevel.INFO: 3447003,
95
+ AlertLevel.WARN: 16776960,
96
+ AlertLevel.ERROR: 15158332,
97
+ }
98
+ emoji = LEVEL_EMOJI.get(level, "")
99
+ step_str = f" • Step {step}" if step is not None else ""
100
+ embed = {
101
+ "title": f"{emoji} [{level.value.upper()}] {title}",
102
+ "color": color_map.get(level, 0),
103
+ "footer": {"text": f"Project: {project} • Run: {run}{step_str}"},
104
+ }
105
+ if text:
106
+ embed["description"] = text
107
+ return {"embeds": [embed]}
108
+
109
+
110
+ def _build_generic_payload(
111
+ level: AlertLevel,
112
+ title: str,
113
+ text: str | None,
114
+ project: str,
115
+ run: str,
116
+ step: int | None,
117
+ timestamp: str | None,
118
+ ) -> dict:
119
+ return {
120
+ "level": level.value,
121
+ "title": title,
122
+ "text": text,
123
+ "project": project,
124
+ "run": run,
125
+ "step": step,
126
+ "timestamp": timestamp,
127
+ }
128
+
129
+
130
+ def parse_alert_level(level: AlertLevel | str) -> AlertLevel:
131
+ if isinstance(level, AlertLevel):
132
+ return level
133
+ normalized = level.lower().strip()
134
+ try:
135
+ return AlertLevel(normalized)
136
+ except ValueError as e:
137
+ allowed = ", ".join(lvl.value for lvl in AlertLevel)
138
+ raise ValueError(
139
+ f"Invalid alert level '{level}'. Expected one of: {allowed}."
140
+ ) from e
141
+
142
+
143
+ def resolve_webhook_min_level(
144
+ webhook_min_level: AlertLevel | str | None,
145
+ ) -> AlertLevel | None:
146
+ if webhook_min_level is None:
147
+ return None
148
+ return parse_alert_level(webhook_min_level)
149
+
150
+
151
+ def should_send_webhook(
152
+ level: AlertLevel, webhook_min_level: AlertLevel | None
153
+ ) -> bool:
154
+ if webhook_min_level is None:
155
+ return True
156
+ return ALERT_LEVEL_ORDER[level] >= ALERT_LEVEL_ORDER[webhook_min_level]
157
+
158
+
159
+ def send_webhook(
160
+ url: str,
161
+ level: AlertLevel,
162
+ title: str,
163
+ text: str | None,
164
+ project: str,
165
+ run: str,
166
+ step: int | None,
167
+ timestamp: str | None = None,
168
+ ) -> None:
169
+ if _is_slack_url(url):
170
+ payload = _build_slack_payload(level, title, text, project, run, step)
171
+ elif _is_discord_url(url):
172
+ payload = _build_discord_payload(level, title, text, project, run, step)
173
+ else:
174
+ payload = _build_generic_payload(
175
+ level, title, text, project, run, step, timestamp
176
+ )
177
+
178
+ data = json.dumps(payload).encode("utf-8")
179
+ req = urllib.request.Request(
180
+ url, data=data, headers={"Content-Type": "application/json"}
181
+ )
182
+ try:
183
+ urllib.request.urlopen(req, timeout=10, context=_SSL_CONTEXT)
184
+ except Exception as e:
185
+ logger.warning(f"Failed to send webhook to {url}: {e}")
trackio/api.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterator
2
+
3
+ from trackio.sqlite_storage import SQLiteStorage
4
+
5
+
6
+ class Run:
7
+ def __init__(self, project: str, name: str):
8
+ self.project = project
9
+ self.name = name
10
+ self._config = None
11
+
12
+ @property
13
+ def id(self) -> str:
14
+ return self.name
15
+
16
+ @property
17
+ def config(self) -> dict | None:
18
+ if self._config is None:
19
+ self._config = SQLiteStorage.get_run_config(self.project, self.name)
20
+ return self._config
21
+
22
+ def alerts(self, level: str | None = None, since: str | None = None) -> list[dict]:
23
+ return SQLiteStorage.get_alerts(
24
+ self.project, run_name=self.name, level=level, since=since
25
+ )
26
+
27
+ def delete(self) -> bool:
28
+ return SQLiteStorage.delete_run(self.project, self.name)
29
+
30
+ def move(self, new_project: str) -> bool:
31
+ success = SQLiteStorage.move_run(self.project, self.name, new_project)
32
+ if success:
33
+ self.project = new_project
34
+ return success
35
+
36
+ def rename(self, new_name: str) -> "Run":
37
+ SQLiteStorage.rename_run(self.project, self.name, new_name)
38
+ self.name = new_name
39
+ return self
40
+
41
+ def __repr__(self) -> str:
42
+ return f"<Run {self.name} in project {self.project}>"
43
+
44
+
45
+ class Runs:
46
+ def __init__(self, project: str):
47
+ self.project = project
48
+ self._runs = None
49
+
50
+ def _load_runs(self):
51
+ if self._runs is None:
52
+ run_names = SQLiteStorage.get_runs(self.project)
53
+ self._runs = [Run(self.project, name) for name in run_names]
54
+
55
+ def __iter__(self) -> Iterator[Run]:
56
+ self._load_runs()
57
+ return iter(self._runs)
58
+
59
+ def __getitem__(self, index: int) -> Run:
60
+ self._load_runs()
61
+ return self._runs[index]
62
+
63
+ def __len__(self) -> int:
64
+ self._load_runs()
65
+ return len(self._runs)
66
+
67
+ def __repr__(self) -> str:
68
+ self._load_runs()
69
+ return f"<Runs project={self.project} count={len(self._runs)}>"
70
+
71
+
72
+ class Api:
73
+ def runs(self, project: str) -> Runs:
74
+ if not SQLiteStorage.get_project_db_path(project).exists():
75
+ raise ValueError(f"Project '{project}' does not exist")
76
+ return Runs(project)
77
+
78
+ def alerts(
79
+ self,
80
+ project: str,
81
+ run: str | None = None,
82
+ level: str | None = None,
83
+ since: str | None = None,
84
+ ) -> list[dict]:
85
+ if not SQLiteStorage.get_project_db_path(project).exists():
86
+ raise ValueError(f"Project '{project}' does not exist")
87
+ return SQLiteStorage.get_alerts(project, run_name=run, level=level, since=since)
trackio/apple_gpu.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+ import subprocess
3
+ import sys
4
+ import threading
5
+ import warnings
6
+ from typing import TYPE_CHECKING, Any
7
+
8
+ if TYPE_CHECKING:
9
+ from trackio.run import Run
10
+
11
+ psutil: Any = None
12
+ PSUTIL_AVAILABLE = False
13
+ _monitor_lock = threading.Lock()
14
+
15
+
16
+ def _ensure_psutil():
17
+ global PSUTIL_AVAILABLE, psutil
18
+ if PSUTIL_AVAILABLE:
19
+ return psutil
20
+ try:
21
+ import psutil as _psutil
22
+
23
+ psutil = _psutil
24
+ PSUTIL_AVAILABLE = True
25
+ return psutil
26
+ except ImportError:
27
+ raise ImportError(
28
+ "psutil is required for Apple Silicon monitoring. "
29
+ "Install it with: pip install psutil"
30
+ )
31
+
32
+
33
+ def is_apple_silicon() -> bool:
34
+ """Check if running on Apple Silicon (M1/M2/M3/M4)."""
35
+ if platform.system() != "Darwin":
36
+ return False
37
+
38
+ try:
39
+ result = subprocess.run(
40
+ ["sysctl", "-n", "machdep.cpu.brand_string"],
41
+ capture_output=True,
42
+ text=True,
43
+ timeout=1,
44
+ )
45
+ cpu_brand = result.stdout.strip()
46
+ return "Apple" in cpu_brand
47
+ except Exception:
48
+ return False
49
+
50
+
51
+ def get_gpu_info() -> dict[str, Any]:
52
+ """Get Apple GPU information using ioreg."""
53
+ try:
54
+ result = subprocess.run(
55
+ ["ioreg", "-r", "-d", "1", "-w", "0", "-c", "IOAccelerator"],
56
+ capture_output=True,
57
+ text=True,
58
+ timeout=2,
59
+ )
60
+
61
+ if result.returncode == 0 and result.stdout:
62
+ lines = result.stdout.strip().split("\n")
63
+ for line in lines:
64
+ if "IOAccelerator" in line and "class" in line:
65
+ return {"detected": True, "type": "Apple GPU"}
66
+ else:
67
+ print("Error collecting Apple GPU info. ioreg stdout was:", file=sys.stderr)
68
+ print(result.stdout, file=sys.stderr)
69
+ print("ioreg stderr was:", file=sys.stderr)
70
+ print(result.stderr, file=sys.stderr)
71
+
72
+ result = subprocess.run(
73
+ ["system_profiler", "SPDisplaysDataType"],
74
+ capture_output=True,
75
+ text=True,
76
+ timeout=3,
77
+ )
78
+
79
+ if result.returncode == 0 and "Apple" in result.stdout:
80
+ for line in result.stdout.split("\n"):
81
+ if "Chipset Model:" in line:
82
+ model = line.split(":")[-1].strip()
83
+ return {"detected": True, "type": model}
84
+
85
+ except Exception:
86
+ pass
87
+
88
+ return {"detected": False}
89
+
90
+
91
+ def apple_gpu_available() -> bool:
92
+ """
93
+ Check if Apple GPU monitoring is available.
94
+
95
+ Returns True if running on Apple Silicon (M-series chips) and psutil is installed.
96
+ """
97
+ try:
98
+ _ensure_psutil()
99
+ return is_apple_silicon()
100
+ except ImportError:
101
+ return False
102
+ except Exception:
103
+ return False
104
+
105
+
106
+ def collect_apple_metrics() -> dict:
107
+ """
108
+ Collect system metrics for Apple Silicon.
109
+
110
+ Returns:
111
+ Dictionary of system metrics including CPU, memory, and GPU info.
112
+ """
113
+ if not PSUTIL_AVAILABLE:
114
+ try:
115
+ _ensure_psutil()
116
+ except ImportError:
117
+ return {}
118
+
119
+ metrics = {}
120
+
121
+ try:
122
+ cpu_percent = psutil.cpu_percent(interval=0.1, percpu=False)
123
+ metrics["cpu/utilization"] = cpu_percent
124
+ except Exception:
125
+ pass
126
+
127
+ try:
128
+ cpu_percents = psutil.cpu_percent(interval=0.1, percpu=True)
129
+ for i, percent in enumerate(cpu_percents):
130
+ metrics[f"cpu/{i}/utilization"] = percent
131
+ except Exception:
132
+ pass
133
+
134
+ try:
135
+ cpu_freq = psutil.cpu_freq()
136
+ if cpu_freq:
137
+ metrics["cpu/frequency"] = cpu_freq.current
138
+ if cpu_freq.max > 0:
139
+ metrics["cpu/frequency_max"] = cpu_freq.max
140
+ except Exception:
141
+ pass
142
+
143
+ try:
144
+ mem = psutil.virtual_memory()
145
+ metrics["memory/used"] = mem.used / (1024**3)
146
+ metrics["memory/total"] = mem.total / (1024**3)
147
+ metrics["memory/available"] = mem.available / (1024**3)
148
+ metrics["memory/percent"] = mem.percent
149
+ except Exception:
150
+ pass
151
+
152
+ try:
153
+ swap = psutil.swap_memory()
154
+ metrics["swap/used"] = swap.used / (1024**3)
155
+ metrics["swap/total"] = swap.total / (1024**3)
156
+ metrics["swap/percent"] = swap.percent
157
+ except Exception:
158
+ pass
159
+
160
+ try:
161
+ sensors_temps = psutil.sensors_temperatures()
162
+ if sensors_temps:
163
+ for name, entries in sensors_temps.items():
164
+ for i, entry in enumerate(entries):
165
+ label = entry.label or f"{name}_{i}"
166
+ metrics[f"temp/{label}"] = entry.current
167
+ except Exception:
168
+ pass
169
+
170
+ gpu_info = get_gpu_info()
171
+ if gpu_info.get("detected"):
172
+ metrics["gpu/detected"] = 1
173
+ if "type" in gpu_info:
174
+ pass
175
+
176
+ return metrics
177
+
178
+
179
+ class AppleGpuMonitor:
180
+ def __init__(self, run: "Run", interval: float = 10.0):
181
+ self._run = run
182
+ self._interval = interval
183
+ self._stop_flag = threading.Event()
184
+ self._thread: "threading.Thread | None" = None
185
+
186
+ def start(self):
187
+ if not is_apple_silicon():
188
+ warnings.warn(
189
+ "auto_log_gpu=True but not running on Apple Silicon. "
190
+ "Apple GPU logging disabled."
191
+ )
192
+ return
193
+
194
+ if not PSUTIL_AVAILABLE:
195
+ try:
196
+ _ensure_psutil()
197
+ except ImportError:
198
+ warnings.warn(
199
+ "auto_log_gpu=True but psutil not installed. "
200
+ "Install with: pip install psutil"
201
+ )
202
+ return
203
+
204
+ self._thread = threading.Thread(target=self._monitor_loop, daemon=True)
205
+ self._thread.start()
206
+
207
+ def stop(self):
208
+ self._stop_flag.set()
209
+ if self._thread is not None:
210
+ self._thread.join(timeout=2.0)
211
+
212
+ def _monitor_loop(self):
213
+ while not self._stop_flag.is_set():
214
+ try:
215
+ metrics = collect_apple_metrics()
216
+ if metrics:
217
+ self._run.log_system(metrics)
218
+ except Exception:
219
+ pass
220
+
221
+ self._stop_flag.wait(timeout=self._interval)
222
+
223
+
224
+ def log_apple_gpu(run: "Run | None" = None) -> dict:
225
+ """
226
+ Log Apple Silicon system metrics to the current or specified run.
227
+
228
+ Args:
229
+ run: Optional Run instance. If None, uses current run from context.
230
+
231
+ Returns:
232
+ dict: The system metrics that were logged.
233
+
234
+ Example:
235
+ ```python
236
+ import trackio
237
+
238
+ run = trackio.init(project="my-project")
239
+ trackio.log({"loss": 0.5})
240
+ trackio.log_apple_gpu()
241
+ ```
242
+ """
243
+ from trackio import context_vars
244
+
245
+ if run is None:
246
+ run = context_vars.current_run.get()
247
+ if run is None:
248
+ raise RuntimeError("Call trackio.init() before trackio.log_apple_gpu().")
249
+
250
+ metrics = collect_apple_metrics()
251
+ if metrics:
252
+ run.log_system(metrics)
253
+ return metrics
trackio/assets/badge.png ADDED
trackio/assets/trackio_logo_dark.png ADDED
trackio/assets/trackio_logo_light.png ADDED
trackio/assets/trackio_logo_old.png ADDED

Git LFS Details

  • SHA256: 3922c4d1e465270ad4d8abb12023f3beed5d9f7f338528a4c0ac21dcf358a1c8
  • Pointer size: 131 Bytes
  • Size of remote file: 487 kB
trackio/assets/trackio_logo_type_dark.png ADDED
trackio/assets/trackio_logo_type_dark_transparent.png ADDED
trackio/assets/trackio_logo_type_light.png ADDED
trackio/assets/trackio_logo_type_light_transparent.png ADDED
trackio/cli.py ADDED
@@ -0,0 +1,1231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+ from trackio import show, sync
5
+ from trackio.cli_helpers import (
6
+ error_exit,
7
+ format_alerts,
8
+ format_json,
9
+ format_list,
10
+ format_metric_values,
11
+ format_project_summary,
12
+ format_run_summary,
13
+ format_snapshot,
14
+ format_system_metric_names,
15
+ format_system_metrics,
16
+ )
17
+ from trackio.markdown import Markdown
18
+ from trackio.server import get_project_summary, get_run_summary
19
+ from trackio.sqlite_storage import SQLiteStorage
20
+
21
+
22
+ def _get_space(args):
23
+ return getattr(args, "space", None)
24
+
25
+
26
+ def _get_remote(args):
27
+ from trackio.remote_client import RemoteClient
28
+
29
+ space = _get_space(args)
30
+ if not space:
31
+ return None
32
+ hf_token = getattr(args, "hf_token", None)
33
+ return RemoteClient(space, hf_token=hf_token)
34
+
35
+
36
+ def _handle_status():
37
+ print("Reading local Trackio projects...\n")
38
+ projects = SQLiteStorage.get_projects()
39
+ if not projects:
40
+ print("No Trackio projects found.")
41
+ return
42
+
43
+ local_projects = []
44
+ synced_projects = []
45
+ unsynced_projects = []
46
+
47
+ for project in projects:
48
+ space_id = SQLiteStorage.get_space_id(project)
49
+ if space_id is None:
50
+ local_projects.append(project)
51
+ elif SQLiteStorage.has_pending_data(project):
52
+ unsynced_projects.append(project)
53
+ else:
54
+ synced_projects.append(project)
55
+
56
+ print("Finished reading Trackio projects")
57
+ if local_projects:
58
+ print(f" * {len(local_projects)} local trackio project(s) [OK]")
59
+ if synced_projects:
60
+ print(f" * {len(synced_projects)} trackio project(s) synced to Spaces [OK]")
61
+ if unsynced_projects:
62
+ print(
63
+ f" * {len(unsynced_projects)} trackio project(s) with unsynced changes [WARNING]:"
64
+ )
65
+ for p in unsynced_projects:
66
+ print(f" - {p}")
67
+
68
+ if unsynced_projects:
69
+ print(
70
+ f"\nRun `trackio sync --project {unsynced_projects[0]}` to sync. "
71
+ "Or run `trackio sync --all` to sync all unsynced changes."
72
+ )
73
+
74
+
75
+ def _handle_sync(args):
76
+ from trackio.deploy import sync_incremental
77
+
78
+ if args.sync_all and args.project:
79
+ error_exit("Cannot use --all and --project together.")
80
+ if not args.sync_all and not args.project:
81
+ error_exit("Must provide either --project or --all.")
82
+
83
+ if args.sync_all:
84
+ projects = SQLiteStorage.get_projects()
85
+ synced_any = False
86
+ for project in projects:
87
+ space_id = SQLiteStorage.get_space_id(project)
88
+ if space_id and SQLiteStorage.has_pending_data(project):
89
+ sync_incremental(
90
+ project, space_id, private=args.private, pending_only=True
91
+ )
92
+ synced_any = True
93
+ if not synced_any:
94
+ print("No projects with unsynced data found.")
95
+ else:
96
+ space_id = args.space_id
97
+ if space_id is None:
98
+ space_id = SQLiteStorage.get_space_id(args.project)
99
+ sync(
100
+ project=args.project,
101
+ space_id=space_id,
102
+ private=args.private,
103
+ force=args.force,
104
+ )
105
+
106
+
107
+ def _extract_reports(
108
+ run: str, logs: list[dict], report_name: str | None = None
109
+ ) -> list[dict]:
110
+ reports = []
111
+ for log in logs:
112
+ timestamp = log.get("timestamp")
113
+ step = log.get("step")
114
+ for key, value in log.items():
115
+ if report_name is not None and key != report_name:
116
+ continue
117
+ if isinstance(value, dict) and value.get("_type") == Markdown.TYPE:
118
+ content = value.get("_value")
119
+ if isinstance(content, str):
120
+ reports.append(
121
+ {
122
+ "run": run,
123
+ "report": key,
124
+ "step": step,
125
+ "timestamp": timestamp,
126
+ "content": content,
127
+ }
128
+ )
129
+ return reports
130
+
131
+
132
+ def main():
133
+ parser = argparse.ArgumentParser(description="Trackio CLI")
134
+ parser.add_argument(
135
+ "--space",
136
+ required=False,
137
+ help="HF Space ID (e.g. 'user/space') or Space URL to query remotely.",
138
+ )
139
+ parser.add_argument(
140
+ "--hf-token",
141
+ required=False,
142
+ help="HF token for accessing private Spaces.",
143
+ )
144
+ subparsers = parser.add_subparsers(dest="command")
145
+
146
+ ui_parser = subparsers.add_parser(
147
+ "show", help="Show the Trackio dashboard UI for a project"
148
+ )
149
+ ui_parser.add_argument(
150
+ "--project", required=False, help="Project name to show in the dashboard"
151
+ )
152
+ ui_parser.add_argument(
153
+ "--theme",
154
+ required=False,
155
+ default="default",
156
+ help="A Gradio Theme to use for the dashboard instead of the default, can be a built-in theme (e.g. 'soft', 'citrus'), or a theme from the Hub (e.g. 'gstaff/xkcd').",
157
+ )
158
+ ui_parser.add_argument(
159
+ "--mcp-server",
160
+ action="store_true",
161
+ help="Enable MCP server functionality. The Trackio dashboard will be set up as an MCP server and certain functions will be exposed as MCP tools.",
162
+ )
163
+ ui_parser.add_argument(
164
+ "--footer",
165
+ action="store_true",
166
+ default=True,
167
+ help="Show the Gradio footer. Use --no-footer to hide it.",
168
+ )
169
+ ui_parser.add_argument(
170
+ "--no-footer",
171
+ dest="footer",
172
+ action="store_false",
173
+ help="Hide the Gradio footer.",
174
+ )
175
+ ui_parser.add_argument(
176
+ "--color-palette",
177
+ required=False,
178
+ help="Comma-separated list of hex color codes for plot lines (e.g. '#FF0000,#00FF00,#0000FF'). If not provided, the TRACKIO_COLOR_PALETTE environment variable will be used, or the default palette if not set.",
179
+ )
180
+ ui_parser.add_argument(
181
+ "--host",
182
+ required=False,
183
+ help="Host to bind the server to (e.g. '0.0.0.0' for remote access). If not provided, defaults to '127.0.0.1' (localhost only).",
184
+ )
185
+
186
+ subparsers.add_parser(
187
+ "status",
188
+ help="Show the status of all local Trackio projects, including sync status.",
189
+ )
190
+
191
+ sync_parser = subparsers.add_parser(
192
+ "sync",
193
+ help="Sync a local project's database to a Hugging Face Space. If the Space does not exist, it will be created.",
194
+ )
195
+ sync_parser.add_argument(
196
+ "--project",
197
+ required=False,
198
+ help="The name of the local project.",
199
+ )
200
+ sync_parser.add_argument(
201
+ "--space-id",
202
+ required=False,
203
+ help="The Hugging Face Space ID where the project will be synced (e.g. username/space_id). If not provided, uses the previously-configured Space.",
204
+ )
205
+ sync_parser.add_argument(
206
+ "--all",
207
+ action="store_true",
208
+ dest="sync_all",
209
+ help="Sync all projects that have unsynced data to their configured Spaces.",
210
+ )
211
+ sync_parser.add_argument(
212
+ "--private",
213
+ action="store_true",
214
+ help="Make the Hugging Face Space private if creating a new Space. By default, the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.",
215
+ )
216
+ sync_parser.add_argument(
217
+ "--force",
218
+ action="store_true",
219
+ help="Overwrite the existing database without prompting for confirmation.",
220
+ )
221
+
222
+ list_parser = subparsers.add_parser(
223
+ "list",
224
+ help="List projects, runs, or metrics",
225
+ )
226
+ list_subparsers = list_parser.add_subparsers(dest="list_type", required=True)
227
+
228
+ list_projects_parser = list_subparsers.add_parser(
229
+ "projects",
230
+ help="List all projects",
231
+ )
232
+ list_projects_parser.add_argument(
233
+ "--json",
234
+ action="store_true",
235
+ help="Output in JSON format",
236
+ )
237
+
238
+ list_runs_parser = list_subparsers.add_parser(
239
+ "runs",
240
+ help="List runs for a project",
241
+ )
242
+ list_runs_parser.add_argument(
243
+ "--project",
244
+ required=True,
245
+ help="Project name",
246
+ )
247
+ list_runs_parser.add_argument(
248
+ "--json",
249
+ action="store_true",
250
+ help="Output in JSON format",
251
+ )
252
+
253
+ list_metrics_parser = list_subparsers.add_parser(
254
+ "metrics",
255
+ help="List metrics for a run",
256
+ )
257
+ list_metrics_parser.add_argument(
258
+ "--project",
259
+ required=True,
260
+ help="Project name",
261
+ )
262
+ list_metrics_parser.add_argument(
263
+ "--run",
264
+ required=True,
265
+ help="Run name",
266
+ )
267
+ list_metrics_parser.add_argument(
268
+ "--json",
269
+ action="store_true",
270
+ help="Output in JSON format",
271
+ )
272
+
273
+ list_system_metrics_parser = list_subparsers.add_parser(
274
+ "system-metrics",
275
+ help="List system metrics for a run",
276
+ )
277
+ list_system_metrics_parser.add_argument(
278
+ "--project",
279
+ required=True,
280
+ help="Project name",
281
+ )
282
+ list_system_metrics_parser.add_argument(
283
+ "--run",
284
+ required=True,
285
+ help="Run name",
286
+ )
287
+ list_system_metrics_parser.add_argument(
288
+ "--json",
289
+ action="store_true",
290
+ help="Output in JSON format",
291
+ )
292
+
293
+ list_alerts_parser = list_subparsers.add_parser(
294
+ "alerts",
295
+ help="List alerts for a project or run",
296
+ )
297
+ list_alerts_parser.add_argument(
298
+ "--project",
299
+ required=True,
300
+ help="Project name",
301
+ )
302
+ list_alerts_parser.add_argument(
303
+ "--run",
304
+ required=False,
305
+ help="Run name (optional)",
306
+ )
307
+ list_alerts_parser.add_argument(
308
+ "--level",
309
+ required=False,
310
+ help="Filter by alert level (info, warn, error)",
311
+ )
312
+ list_alerts_parser.add_argument(
313
+ "--json",
314
+ action="store_true",
315
+ help="Output in JSON format",
316
+ )
317
+ list_alerts_parser.add_argument(
318
+ "--since",
319
+ required=False,
320
+ help="Only show alerts after this ISO 8601 timestamp",
321
+ )
322
+
323
+ list_reports_parser = list_subparsers.add_parser(
324
+ "reports",
325
+ help="List markdown reports for a project or run",
326
+ )
327
+ list_reports_parser.add_argument(
328
+ "--project",
329
+ required=True,
330
+ help="Project name",
331
+ )
332
+ list_reports_parser.add_argument(
333
+ "--run",
334
+ required=False,
335
+ help="Run name (optional)",
336
+ )
337
+ list_reports_parser.add_argument(
338
+ "--json",
339
+ action="store_true",
340
+ help="Output in JSON format",
341
+ )
342
+
343
+ get_parser = subparsers.add_parser(
344
+ "get",
345
+ help="Get project, run, or metric information",
346
+ )
347
+ get_subparsers = get_parser.add_subparsers(dest="get_type", required=True)
348
+
349
+ get_project_parser = get_subparsers.add_parser(
350
+ "project",
351
+ help="Get project summary",
352
+ )
353
+ get_project_parser.add_argument(
354
+ "--project",
355
+ required=True,
356
+ help="Project name",
357
+ )
358
+ get_project_parser.add_argument(
359
+ "--json",
360
+ action="store_true",
361
+ help="Output in JSON format",
362
+ )
363
+
364
+ get_run_parser = get_subparsers.add_parser(
365
+ "run",
366
+ help="Get run summary",
367
+ )
368
+ get_run_parser.add_argument(
369
+ "--project",
370
+ required=True,
371
+ help="Project name",
372
+ )
373
+ get_run_parser.add_argument(
374
+ "--run",
375
+ required=True,
376
+ help="Run name",
377
+ )
378
+ get_run_parser.add_argument(
379
+ "--json",
380
+ action="store_true",
381
+ help="Output in JSON format",
382
+ )
383
+
384
+ get_metric_parser = get_subparsers.add_parser(
385
+ "metric",
386
+ help="Get metric values for a run",
387
+ )
388
+ get_metric_parser.add_argument(
389
+ "--project",
390
+ required=True,
391
+ help="Project name",
392
+ )
393
+ get_metric_parser.add_argument(
394
+ "--run",
395
+ required=True,
396
+ help="Run name",
397
+ )
398
+ get_metric_parser.add_argument(
399
+ "--metric",
400
+ required=True,
401
+ help="Metric name",
402
+ )
403
+ get_metric_parser.add_argument(
404
+ "--step",
405
+ type=int,
406
+ required=False,
407
+ help="Get metric at exactly this step",
408
+ )
409
+ get_metric_parser.add_argument(
410
+ "--around",
411
+ type=int,
412
+ required=False,
413
+ help="Get metrics around this step (use with --window)",
414
+ )
415
+ get_metric_parser.add_argument(
416
+ "--at-time",
417
+ required=False,
418
+ help="Get metrics around this ISO 8601 timestamp (use with --window)",
419
+ )
420
+ get_metric_parser.add_argument(
421
+ "--window",
422
+ type=int,
423
+ required=False,
424
+ default=10,
425
+ help="Window size: ±steps for --around, ±seconds for --at-time (default: 10)",
426
+ )
427
+ get_metric_parser.add_argument(
428
+ "--json",
429
+ action="store_true",
430
+ help="Output in JSON format",
431
+ )
432
+
433
+ get_snapshot_parser = get_subparsers.add_parser(
434
+ "snapshot",
435
+ help="Get all metrics at/around a step or timestamp",
436
+ )
437
+ get_snapshot_parser.add_argument(
438
+ "--project",
439
+ required=True,
440
+ help="Project name",
441
+ )
442
+ get_snapshot_parser.add_argument(
443
+ "--run",
444
+ required=True,
445
+ help="Run name",
446
+ )
447
+ get_snapshot_parser.add_argument(
448
+ "--step",
449
+ type=int,
450
+ required=False,
451
+ help="Get all metrics at exactly this step",
452
+ )
453
+ get_snapshot_parser.add_argument(
454
+ "--around",
455
+ type=int,
456
+ required=False,
457
+ help="Get all metrics around this step (use with --window)",
458
+ )
459
+ get_snapshot_parser.add_argument(
460
+ "--at-time",
461
+ required=False,
462
+ help="Get all metrics around this ISO 8601 timestamp (use with --window)",
463
+ )
464
+ get_snapshot_parser.add_argument(
465
+ "--window",
466
+ type=int,
467
+ required=False,
468
+ default=10,
469
+ help="Window size: ±steps for --around, ±seconds for --at-time (default: 10)",
470
+ )
471
+ get_snapshot_parser.add_argument(
472
+ "--json",
473
+ action="store_true",
474
+ help="Output in JSON format",
475
+ )
476
+
477
+ get_system_metric_parser = get_subparsers.add_parser(
478
+ "system-metric",
479
+ help="Get system metric values for a run",
480
+ )
481
+ get_system_metric_parser.add_argument(
482
+ "--project",
483
+ required=True,
484
+ help="Project name",
485
+ )
486
+ get_system_metric_parser.add_argument(
487
+ "--run",
488
+ required=True,
489
+ help="Run name",
490
+ )
491
+ get_system_metric_parser.add_argument(
492
+ "--metric",
493
+ required=False,
494
+ help="System metric name (optional, if not provided returns all system metrics)",
495
+ )
496
+ get_system_metric_parser.add_argument(
497
+ "--json",
498
+ action="store_true",
499
+ help="Output in JSON format",
500
+ )
501
+
502
+ get_alerts_parser = get_subparsers.add_parser(
503
+ "alerts",
504
+ help="Get alerts for a project or run",
505
+ )
506
+ get_alerts_parser.add_argument(
507
+ "--project",
508
+ required=True,
509
+ help="Project name",
510
+ )
511
+ get_alerts_parser.add_argument(
512
+ "--run",
513
+ required=False,
514
+ help="Run name (optional)",
515
+ )
516
+ get_alerts_parser.add_argument(
517
+ "--level",
518
+ required=False,
519
+ help="Filter by alert level (info, warn, error)",
520
+ )
521
+ get_alerts_parser.add_argument(
522
+ "--json",
523
+ action="store_true",
524
+ help="Output in JSON format",
525
+ )
526
+ get_alerts_parser.add_argument(
527
+ "--since",
528
+ required=False,
529
+ help="Only show alerts after this ISO 8601 timestamp",
530
+ )
531
+
532
+ get_report_parser = get_subparsers.add_parser(
533
+ "report",
534
+ help="Get markdown report entries for a run",
535
+ )
536
+ get_report_parser.add_argument(
537
+ "--project",
538
+ required=True,
539
+ help="Project name",
540
+ )
541
+ get_report_parser.add_argument(
542
+ "--run",
543
+ required=True,
544
+ help="Run name",
545
+ )
546
+ get_report_parser.add_argument(
547
+ "--report",
548
+ required=True,
549
+ help="Report metric name",
550
+ )
551
+ get_report_parser.add_argument(
552
+ "--json",
553
+ action="store_true",
554
+ help="Output in JSON format",
555
+ )
556
+
557
+ skills_parser = subparsers.add_parser(
558
+ "skills",
559
+ help="Manage Trackio skills for AI coding assistants",
560
+ )
561
+ skills_subparsers = skills_parser.add_subparsers(
562
+ dest="skills_action", required=True
563
+ )
564
+ skills_add_parser = skills_subparsers.add_parser(
565
+ "add",
566
+ help="Download and install the Trackio skill for an AI assistant",
567
+ )
568
+ skills_add_parser.add_argument(
569
+ "--cursor",
570
+ action="store_true",
571
+ help="Install for Cursor",
572
+ )
573
+ skills_add_parser.add_argument(
574
+ "--claude",
575
+ action="store_true",
576
+ help="Install for Claude Code",
577
+ )
578
+ skills_add_parser.add_argument(
579
+ "--codex",
580
+ action="store_true",
581
+ help="Install for Codex",
582
+ )
583
+ skills_add_parser.add_argument(
584
+ "--opencode",
585
+ action="store_true",
586
+ help="Install for OpenCode",
587
+ )
588
+ skills_add_parser.add_argument(
589
+ "--global",
590
+ dest="global_",
591
+ action="store_true",
592
+ help="Install globally (user-level) instead of in the current project directory",
593
+ )
594
+ skills_add_parser.add_argument(
595
+ "--dest",
596
+ type=str,
597
+ required=False,
598
+ help="Install into a custom destination (path to skills directory)",
599
+ )
600
+ skills_add_parser.add_argument(
601
+ "--force",
602
+ action="store_true",
603
+ help="Overwrite existing skill if it already exists",
604
+ )
605
+
606
+ args, unknown_args = parser.parse_known_args()
607
+ if unknown_args:
608
+ trailing_global_parser = argparse.ArgumentParser(add_help=False)
609
+ trailing_global_parser.add_argument("--space", required=False)
610
+ trailing_global_parser.add_argument("--hf-token", required=False)
611
+ trailing_globals, remaining_unknown = trailing_global_parser.parse_known_args(
612
+ unknown_args
613
+ )
614
+ if remaining_unknown:
615
+ parser.error(f"unrecognized arguments: {' '.join(remaining_unknown)}")
616
+ if trailing_globals.space is not None:
617
+ args.space = trailing_globals.space
618
+ if trailing_globals.hf_token is not None:
619
+ args.hf_token = trailing_globals.hf_token
620
+
621
+ if args.command in ("show", "status", "sync", "skills") and _get_space(args):
622
+ error_exit(
623
+ f"The '{args.command}' command does not support --space (remote mode)."
624
+ )
625
+
626
+ if args.command == "show":
627
+ color_palette = None
628
+ if args.color_palette:
629
+ color_palette = [color.strip() for color in args.color_palette.split(",")]
630
+ show(
631
+ project=args.project,
632
+ theme=args.theme,
633
+ mcp_server=args.mcp_server,
634
+ footer=args.footer,
635
+ color_palette=color_palette,
636
+ host=args.host,
637
+ )
638
+ elif args.command == "status":
639
+ _handle_status()
640
+ elif args.command == "sync":
641
+ _handle_sync(args)
642
+ elif args.command == "list":
643
+ remote = _get_remote(args)
644
+ if args.list_type == "projects":
645
+ if remote:
646
+ projects = remote.predict(api_name="/get_all_projects")
647
+ else:
648
+ projects = SQLiteStorage.get_projects()
649
+ if args.json:
650
+ print(format_json({"projects": projects}))
651
+ else:
652
+ print(format_list(projects, "Projects"))
653
+ elif args.list_type == "runs":
654
+ if remote:
655
+ runs = remote.predict(args.project, api_name="/get_runs_for_project")
656
+ else:
657
+ db_path = SQLiteStorage.get_project_db_path(args.project)
658
+ if not db_path.exists():
659
+ error_exit(f"Project '{args.project}' not found.")
660
+ runs = SQLiteStorage.get_runs(args.project)
661
+ if args.json:
662
+ print(format_json({"project": args.project, "runs": runs}))
663
+ else:
664
+ print(format_list(runs, f"Runs in '{args.project}'"))
665
+ elif args.list_type == "metrics":
666
+ if remote:
667
+ metrics = remote.predict(
668
+ args.project, args.run, api_name="/get_metrics_for_run"
669
+ )
670
+ else:
671
+ db_path = SQLiteStorage.get_project_db_path(args.project)
672
+ if not db_path.exists():
673
+ error_exit(f"Project '{args.project}' not found.")
674
+ runs = SQLiteStorage.get_runs(args.project)
675
+ if args.run not in runs:
676
+ error_exit(
677
+ f"Run '{args.run}' not found in project '{args.project}'."
678
+ )
679
+ metrics = SQLiteStorage.get_all_metrics_for_run(args.project, args.run)
680
+ if args.json:
681
+ print(
682
+ format_json(
683
+ {"project": args.project, "run": args.run, "metrics": metrics}
684
+ )
685
+ )
686
+ else:
687
+ print(
688
+ format_list(
689
+ metrics, f"Metrics for '{args.run}' in '{args.project}'"
690
+ )
691
+ )
692
+ elif args.list_type == "system-metrics":
693
+ if remote:
694
+ system_metrics = remote.predict(
695
+ args.project, args.run, api_name="/get_system_metrics_for_run"
696
+ )
697
+ else:
698
+ db_path = SQLiteStorage.get_project_db_path(args.project)
699
+ if not db_path.exists():
700
+ error_exit(f"Project '{args.project}' not found.")
701
+ runs = SQLiteStorage.get_runs(args.project)
702
+ if args.run not in runs:
703
+ error_exit(
704
+ f"Run '{args.run}' not found in project '{args.project}'."
705
+ )
706
+ system_metrics = SQLiteStorage.get_all_system_metrics_for_run(
707
+ args.project, args.run
708
+ )
709
+ if args.json:
710
+ print(
711
+ format_json(
712
+ {
713
+ "project": args.project,
714
+ "run": args.run,
715
+ "system_metrics": system_metrics,
716
+ }
717
+ )
718
+ )
719
+ else:
720
+ print(format_system_metric_names(system_metrics))
721
+ elif args.list_type == "alerts":
722
+ if remote:
723
+ alerts = remote.predict(
724
+ args.project,
725
+ args.run,
726
+ args.level,
727
+ args.since,
728
+ api_name="/get_alerts",
729
+ )
730
+ else:
731
+ db_path = SQLiteStorage.get_project_db_path(args.project)
732
+ if not db_path.exists():
733
+ error_exit(f"Project '{args.project}' not found.")
734
+ alerts = SQLiteStorage.get_alerts(
735
+ args.project,
736
+ run_name=args.run,
737
+ level=args.level,
738
+ since=args.since,
739
+ )
740
+ if args.json:
741
+ print(
742
+ format_json(
743
+ {
744
+ "project": args.project,
745
+ "run": args.run,
746
+ "level": args.level,
747
+ "since": args.since,
748
+ "alerts": alerts,
749
+ }
750
+ )
751
+ )
752
+ else:
753
+ print(format_alerts(alerts))
754
+ elif args.list_type == "reports":
755
+ if remote:
756
+ runs = remote.predict(args.project, api_name="/get_runs_for_project")
757
+ else:
758
+ db_path = SQLiteStorage.get_project_db_path(args.project)
759
+ if not db_path.exists():
760
+ error_exit(f"Project '{args.project}' not found.")
761
+ runs = SQLiteStorage.get_runs(args.project)
762
+ if args.run and args.run not in runs:
763
+ error_exit(f"Run '{args.run}' not found in project '{args.project}'.")
764
+
765
+ target_runs = [args.run] if args.run else runs
766
+ all_reports = []
767
+ for run_name in target_runs:
768
+ if remote:
769
+ logs = remote.predict(args.project, run_name, api_name="/get_logs")
770
+ else:
771
+ logs = SQLiteStorage.get_logs(args.project, run_name)
772
+ all_reports.extend(_extract_reports(run_name, logs))
773
+
774
+ if args.json:
775
+ print(
776
+ format_json(
777
+ {
778
+ "project": args.project,
779
+ "run": args.run,
780
+ "reports": all_reports,
781
+ }
782
+ )
783
+ )
784
+ else:
785
+ report_lines = [
786
+ f"{entry['run']} | {entry['report']} | step={entry['step']} | {entry['timestamp']}"
787
+ for entry in all_reports
788
+ ]
789
+ if args.run:
790
+ print(
791
+ format_list(
792
+ report_lines,
793
+ f"Reports for '{args.run}' in '{args.project}'",
794
+ )
795
+ )
796
+ else:
797
+ print(format_list(report_lines, f"Reports in '{args.project}'"))
798
+ elif args.command == "get":
799
+ remote = _get_remote(args)
800
+ if args.get_type == "project":
801
+ if remote:
802
+ summary = remote.predict(args.project, api_name="/get_project_summary")
803
+ else:
804
+ db_path = SQLiteStorage.get_project_db_path(args.project)
805
+ if not db_path.exists():
806
+ error_exit(f"Project '{args.project}' not found.")
807
+ summary = get_project_summary(args.project)
808
+ if args.json:
809
+ print(format_json(summary))
810
+ else:
811
+ print(format_project_summary(summary))
812
+ elif args.get_type == "run":
813
+ if remote:
814
+ summary = remote.predict(
815
+ args.project, args.run, api_name="/get_run_summary"
816
+ )
817
+ else:
818
+ db_path = SQLiteStorage.get_project_db_path(args.project)
819
+ if not db_path.exists():
820
+ error_exit(f"Project '{args.project}' not found.")
821
+ runs = SQLiteStorage.get_runs(args.project)
822
+ if args.run not in runs:
823
+ error_exit(
824
+ f"Run '{args.run}' not found in project '{args.project}'."
825
+ )
826
+ summary = get_run_summary(args.project, args.run)
827
+ if args.json:
828
+ print(format_json(summary))
829
+ else:
830
+ print(format_run_summary(summary))
831
+ elif args.get_type == "metric":
832
+ at_time = getattr(args, "at_time", None)
833
+ if remote:
834
+ values = remote.predict(
835
+ args.project,
836
+ args.run,
837
+ args.metric,
838
+ args.step,
839
+ args.around,
840
+ at_time,
841
+ args.window,
842
+ api_name="/get_metric_values",
843
+ )
844
+ else:
845
+ db_path = SQLiteStorage.get_project_db_path(args.project)
846
+ if not db_path.exists():
847
+ error_exit(f"Project '{args.project}' not found.")
848
+ runs = SQLiteStorage.get_runs(args.project)
849
+ if args.run not in runs:
850
+ error_exit(
851
+ f"Run '{args.run}' not found in project '{args.project}'."
852
+ )
853
+ metrics = SQLiteStorage.get_all_metrics_for_run(args.project, args.run)
854
+ if args.metric not in metrics:
855
+ error_exit(
856
+ f"Metric '{args.metric}' not found in run '{args.run}' of project '{args.project}'."
857
+ )
858
+ values = SQLiteStorage.get_metric_values(
859
+ args.project,
860
+ args.run,
861
+ args.metric,
862
+ step=args.step,
863
+ around_step=args.around,
864
+ at_time=at_time,
865
+ window=args.window,
866
+ )
867
+ if args.json:
868
+ print(
869
+ format_json(
870
+ {
871
+ "project": args.project,
872
+ "run": args.run,
873
+ "metric": args.metric,
874
+ "values": values,
875
+ }
876
+ )
877
+ )
878
+ else:
879
+ print(format_metric_values(values))
880
+ elif args.get_type == "snapshot":
881
+ if not args.step and not args.around and not getattr(args, "at_time", None):
882
+ error_exit(
883
+ "Provide --step, --around (with --window), or --at-time (with --window)."
884
+ )
885
+ at_time = getattr(args, "at_time", None)
886
+ if remote:
887
+ snapshot = remote.predict(
888
+ args.project,
889
+ args.run,
890
+ args.step,
891
+ args.around,
892
+ at_time,
893
+ args.window,
894
+ api_name="/get_snapshot",
895
+ )
896
+ else:
897
+ db_path = SQLiteStorage.get_project_db_path(args.project)
898
+ if not db_path.exists():
899
+ error_exit(f"Project '{args.project}' not found.")
900
+ runs = SQLiteStorage.get_runs(args.project)
901
+ if args.run not in runs:
902
+ error_exit(
903
+ f"Run '{args.run}' not found in project '{args.project}'."
904
+ )
905
+ snapshot = SQLiteStorage.get_snapshot(
906
+ args.project,
907
+ args.run,
908
+ step=args.step,
909
+ around_step=args.around,
910
+ at_time=at_time,
911
+ window=args.window,
912
+ )
913
+ if args.json:
914
+ result = {
915
+ "project": args.project,
916
+ "run": args.run,
917
+ "metrics": snapshot,
918
+ }
919
+ if args.step is not None:
920
+ result["step"] = args.step
921
+ if args.around is not None:
922
+ result["around"] = args.around
923
+ result["window"] = args.window
924
+ if at_time is not None:
925
+ result["at_time"] = at_time
926
+ result["window"] = args.window
927
+ print(format_json(result))
928
+ else:
929
+ print(format_snapshot(snapshot))
930
+ elif args.get_type == "system-metric":
931
+ if remote:
932
+ system_metrics = remote.predict(
933
+ args.project, args.run, api_name="/get_system_logs"
934
+ )
935
+ if args.metric:
936
+ all_system_metric_names = remote.predict(
937
+ args.project,
938
+ args.run,
939
+ api_name="/get_system_metrics_for_run",
940
+ )
941
+ if args.metric not in all_system_metric_names:
942
+ error_exit(
943
+ f"System metric '{args.metric}' not found in run '{args.run}' of project '{args.project}'."
944
+ )
945
+ filtered_metrics = [
946
+ {
947
+ k: v
948
+ for k, v in entry.items()
949
+ if k == "timestamp" or k == args.metric
950
+ }
951
+ for entry in system_metrics
952
+ if args.metric in entry
953
+ ]
954
+ if args.json:
955
+ print(
956
+ format_json(
957
+ {
958
+ "project": args.project,
959
+ "run": args.run,
960
+ "metric": args.metric,
961
+ "values": filtered_metrics,
962
+ }
963
+ )
964
+ )
965
+ else:
966
+ print(format_system_metrics(filtered_metrics))
967
+ else:
968
+ if args.json:
969
+ print(
970
+ format_json(
971
+ {
972
+ "project": args.project,
973
+ "run": args.run,
974
+ "system_metrics": system_metrics,
975
+ }
976
+ )
977
+ )
978
+ else:
979
+ print(format_system_metrics(system_metrics))
980
+ else:
981
+ db_path = SQLiteStorage.get_project_db_path(args.project)
982
+ if not db_path.exists():
983
+ error_exit(f"Project '{args.project}' not found.")
984
+ runs = SQLiteStorage.get_runs(args.project)
985
+ if args.run not in runs:
986
+ error_exit(
987
+ f"Run '{args.run}' not found in project '{args.project}'."
988
+ )
989
+ if args.metric:
990
+ system_metrics = SQLiteStorage.get_system_logs(
991
+ args.project, args.run
992
+ )
993
+ all_system_metric_names = (
994
+ SQLiteStorage.get_all_system_metrics_for_run(
995
+ args.project, args.run
996
+ )
997
+ )
998
+ if args.metric not in all_system_metric_names:
999
+ error_exit(
1000
+ f"System metric '{args.metric}' not found in run '{args.run}' of project '{args.project}'."
1001
+ )
1002
+ filtered_metrics = [
1003
+ {
1004
+ k: v
1005
+ for k, v in entry.items()
1006
+ if k == "timestamp" or k == args.metric
1007
+ }
1008
+ for entry in system_metrics
1009
+ if args.metric in entry
1010
+ ]
1011
+ if args.json:
1012
+ print(
1013
+ format_json(
1014
+ {
1015
+ "project": args.project,
1016
+ "run": args.run,
1017
+ "metric": args.metric,
1018
+ "values": filtered_metrics,
1019
+ }
1020
+ )
1021
+ )
1022
+ else:
1023
+ print(format_system_metrics(filtered_metrics))
1024
+ else:
1025
+ system_metrics = SQLiteStorage.get_system_logs(
1026
+ args.project, args.run
1027
+ )
1028
+ if args.json:
1029
+ print(
1030
+ format_json(
1031
+ {
1032
+ "project": args.project,
1033
+ "run": args.run,
1034
+ "system_metrics": system_metrics,
1035
+ }
1036
+ )
1037
+ )
1038
+ else:
1039
+ print(format_system_metrics(system_metrics))
1040
+ elif args.get_type == "alerts":
1041
+ if remote:
1042
+ alerts = remote.predict(
1043
+ args.project,
1044
+ args.run,
1045
+ args.level,
1046
+ args.since,
1047
+ api_name="/get_alerts",
1048
+ )
1049
+ else:
1050
+ db_path = SQLiteStorage.get_project_db_path(args.project)
1051
+ if not db_path.exists():
1052
+ error_exit(f"Project '{args.project}' not found.")
1053
+ alerts = SQLiteStorage.get_alerts(
1054
+ args.project,
1055
+ run_name=args.run,
1056
+ level=args.level,
1057
+ since=args.since,
1058
+ )
1059
+ if args.json:
1060
+ print(
1061
+ format_json(
1062
+ {
1063
+ "project": args.project,
1064
+ "run": args.run,
1065
+ "level": args.level,
1066
+ "since": args.since,
1067
+ "alerts": alerts,
1068
+ }
1069
+ )
1070
+ )
1071
+ else:
1072
+ print(format_alerts(alerts))
1073
+ elif args.get_type == "report":
1074
+ if remote:
1075
+ logs = remote.predict(args.project, args.run, api_name="/get_logs")
1076
+ else:
1077
+ db_path = SQLiteStorage.get_project_db_path(args.project)
1078
+ if not db_path.exists():
1079
+ error_exit(f"Project '{args.project}' not found.")
1080
+ runs = SQLiteStorage.get_runs(args.project)
1081
+ if args.run not in runs:
1082
+ error_exit(
1083
+ f"Run '{args.run}' not found in project '{args.project}'."
1084
+ )
1085
+ logs = SQLiteStorage.get_logs(args.project, args.run)
1086
+
1087
+ reports = _extract_reports(args.run, logs, report_name=args.report)
1088
+ if not reports:
1089
+ error_exit(
1090
+ f"Report '{args.report}' not found in run '{args.run}' of project '{args.project}'."
1091
+ )
1092
+
1093
+ if args.json:
1094
+ print(
1095
+ format_json(
1096
+ {
1097
+ "project": args.project,
1098
+ "run": args.run,
1099
+ "report": args.report,
1100
+ "values": reports,
1101
+ }
1102
+ )
1103
+ )
1104
+ else:
1105
+ output = []
1106
+ for idx, entry in enumerate(reports, start=1):
1107
+ output.append(
1108
+ f"Entry {idx} | step={entry['step']} | timestamp={entry['timestamp']}"
1109
+ )
1110
+ output.append(entry["content"])
1111
+ if idx < len(reports):
1112
+ output.append("-" * 80)
1113
+ print("\n".join(output))
1114
+ elif args.command == "skills":
1115
+ if args.skills_action == "add":
1116
+ _handle_skills_add(args)
1117
+ else:
1118
+ parser.print_help()
1119
+
1120
+
1121
+ def _handle_skills_add(args):
1122
+ import shutil
1123
+ from pathlib import Path
1124
+
1125
+ try:
1126
+ from huggingface_hub.cli.skills import (
1127
+ CENTRAL_GLOBAL,
1128
+ CENTRAL_LOCAL,
1129
+ GLOBAL_TARGETS,
1130
+ LOCAL_TARGETS,
1131
+ )
1132
+ except (ImportError, ModuleNotFoundError):
1133
+ error_exit(
1134
+ "The 'trackio skills' command requires huggingface_hub >= 1.4.0.\n"
1135
+ "Please upgrade: pip install --upgrade huggingface_hub"
1136
+ )
1137
+
1138
+ SKILL_ID = "trackio"
1139
+ GITHUB_RAW = "https://raw.githubusercontent.com/gradio-app/trackio/main"
1140
+ SKILL_PREFIX = ".agents/skills/trackio"
1141
+ SKILL_FILES = [
1142
+ "SKILL.md",
1143
+ "alerts.md",
1144
+ "logging_metrics.md",
1145
+ "retrieving_metrics.md",
1146
+ ]
1147
+
1148
+ if not (args.cursor or args.claude or args.codex or args.opencode or args.dest):
1149
+ error_exit(
1150
+ "Pick a destination via --cursor, --claude, --codex, --opencode, or --dest."
1151
+ )
1152
+
1153
+ def download(url: str) -> str:
1154
+ from huggingface_hub.utils import get_session
1155
+
1156
+ try:
1157
+ response = get_session().get(url)
1158
+ response.raise_for_status()
1159
+ except Exception as e:
1160
+ error_exit(
1161
+ f"Failed to download {url}\n{e}\n\n"
1162
+ "Make sure you have internet access. The skill files are fetched from "
1163
+ "the Trackio GitHub repository."
1164
+ )
1165
+ return response.text
1166
+
1167
+ def remove_existing(path: Path, force: bool):
1168
+ if not (path.exists() or path.is_symlink()):
1169
+ return
1170
+ if not force:
1171
+ error_exit(
1172
+ f"Skill already exists at {path}.\nRe-run with --force to overwrite."
1173
+ )
1174
+ if path.is_dir() and not path.is_symlink():
1175
+ shutil.rmtree(path)
1176
+ else:
1177
+ path.unlink()
1178
+
1179
+ def install_to(skills_dir: Path, force: bool) -> Path:
1180
+ skills_dir = skills_dir.expanduser().resolve()
1181
+ skills_dir.mkdir(parents=True, exist_ok=True)
1182
+ dest = skills_dir / SKILL_ID
1183
+ remove_existing(dest, force)
1184
+ dest.mkdir()
1185
+ for fname in SKILL_FILES:
1186
+ content = download(f"{GITHUB_RAW}/{SKILL_PREFIX}/{fname}")
1187
+ (dest / fname).write_text(content, encoding="utf-8")
1188
+ return dest
1189
+
1190
+ def create_symlink(
1191
+ agent_skills_dir: Path, central_skill_path: Path, force: bool
1192
+ ) -> Path:
1193
+ agent_skills_dir = agent_skills_dir.expanduser().resolve()
1194
+ agent_skills_dir.mkdir(parents=True, exist_ok=True)
1195
+ link_path = agent_skills_dir / SKILL_ID
1196
+ remove_existing(link_path, force)
1197
+ link_path.symlink_to(os.path.relpath(central_skill_path, agent_skills_dir))
1198
+ return link_path
1199
+
1200
+ global_targets = {**GLOBAL_TARGETS, "cursor": Path("~/.cursor/skills")}
1201
+ local_targets = {**LOCAL_TARGETS, "cursor": Path(".cursor/skills")}
1202
+ targets_dict = global_targets if args.global_ else local_targets
1203
+
1204
+ if args.dest:
1205
+ if args.cursor or args.claude or args.codex or args.opencode or args.global_:
1206
+ error_exit("--dest cannot be combined with agent flags or --global.")
1207
+ skill_dest = install_to(Path(args.dest), args.force)
1208
+ print(f"Installed '{SKILL_ID}' to {skill_dest}")
1209
+ return
1210
+
1211
+ agent_targets = []
1212
+ if args.cursor:
1213
+ agent_targets.append(targets_dict["cursor"])
1214
+ if args.claude:
1215
+ agent_targets.append(targets_dict["claude"])
1216
+ if args.codex:
1217
+ agent_targets.append(targets_dict["codex"])
1218
+ if args.opencode:
1219
+ agent_targets.append(targets_dict["opencode"])
1220
+
1221
+ central_path = CENTRAL_GLOBAL if args.global_ else CENTRAL_LOCAL
1222
+ central_skill_path = install_to(central_path, args.force)
1223
+ print(f"Installed '{SKILL_ID}' to central location: {central_skill_path}")
1224
+
1225
+ for agent_target in agent_targets:
1226
+ link_path = create_symlink(agent_target, central_skill_path, args.force)
1227
+ print(f"Created symlink: {link_path}")
1228
+
1229
+
1230
+ if __name__ == "__main__":
1231
+ main()
trackio/cli_helpers.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import sys
3
+ from typing import Any
4
+
5
+
6
+ def format_json(data: Any) -> str:
7
+ """Format data as JSON."""
8
+ return json.dumps(data, indent=2)
9
+
10
+
11
+ def format_list(items: list[str], title: str | None = None) -> str:
12
+ """Format a list of items in human-readable format."""
13
+ if not items:
14
+ return f"No {title.lower() if title else 'items'} found."
15
+
16
+ output = []
17
+ if title:
18
+ output.append(f"{title}:")
19
+
20
+ for item in items:
21
+ output.append(f" - {item}")
22
+
23
+ return "\n".join(output)
24
+
25
+
26
+ def format_project_summary(summary: dict) -> str:
27
+ """Format project summary in human-readable format."""
28
+ output = [f"Project: {summary['project']}"]
29
+ output.append(f"Number of runs: {summary['num_runs']}")
30
+
31
+ if summary["runs"]:
32
+ output.append("\nRuns:")
33
+ for run in summary["runs"]:
34
+ output.append(f" - {run}")
35
+ else:
36
+ output.append("\nNo runs found.")
37
+
38
+ if summary.get("last_activity"):
39
+ output.append(f"\nLast activity (max step): {summary['last_activity']}")
40
+
41
+ return "\n".join(output)
42
+
43
+
44
+ def format_run_summary(summary: dict) -> str:
45
+ """Format run summary in human-readable format."""
46
+ output = [f"Project: {summary['project']}"]
47
+ output.append(f"Run: {summary['run']}")
48
+ output.append(f"Number of logs: {summary['num_logs']}")
49
+
50
+ if summary.get("last_step") is not None:
51
+ output.append(f"Last step: {summary['last_step']}")
52
+
53
+ if summary.get("metrics"):
54
+ output.append("\nMetrics:")
55
+ for metric in summary["metrics"]:
56
+ output.append(f" - {metric}")
57
+ else:
58
+ output.append("\nNo metrics found.")
59
+
60
+ config = summary.get("config")
61
+ if config:
62
+ output.append("\nConfig:")
63
+ config_display = {k: v for k, v in config.items() if not k.startswith("_")}
64
+ if config_display:
65
+ for key, value in config_display.items():
66
+ output.append(f" {key}: {value}")
67
+ else:
68
+ output.append(" (no config)")
69
+ else:
70
+ output.append("\nConfig: (no config)")
71
+
72
+ return "\n".join(output)
73
+
74
+
75
+ def format_metric_values(values: list[dict]) -> str:
76
+ """Format metric values in human-readable format."""
77
+ if not values:
78
+ return "No metric values found."
79
+
80
+ output = [f"Found {len(values)} value(s):\n"]
81
+ output.append("Step | Timestamp | Value")
82
+ output.append("-" * 50)
83
+
84
+ for value in values:
85
+ step = value.get("step", "N/A")
86
+ timestamp = value.get("timestamp", "N/A")
87
+ val = value.get("value", "N/A")
88
+ output.append(f"{step} | {timestamp} | {val}")
89
+
90
+ return "\n".join(output)
91
+
92
+
93
+ def format_system_metrics(metrics: list[dict]) -> str:
94
+ """Format system metrics in human-readable format."""
95
+ if not metrics:
96
+ return "No system metrics found."
97
+
98
+ output = [f"Found {len(metrics)} system metric entry/entries:\n"]
99
+
100
+ for i, entry in enumerate(metrics):
101
+ timestamp = entry.get("timestamp", "N/A")
102
+ output.append(f"\nEntry {i + 1} (Timestamp: {timestamp}):")
103
+ for key, value in entry.items():
104
+ if key != "timestamp":
105
+ output.append(f" {key}: {value}")
106
+
107
+ return "\n".join(output)
108
+
109
+
110
+ def format_system_metric_names(names: list[str]) -> str:
111
+ """Format system metric names in human-readable format."""
112
+ return format_list(names, "System Metrics")
113
+
114
+
115
+ def format_snapshot(snapshot: dict[str, list[dict]]) -> str:
116
+ """Format a metrics snapshot in human-readable format."""
117
+ if not snapshot:
118
+ return "No metrics found in the specified range."
119
+
120
+ output = []
121
+ for metric_name, values in sorted(snapshot.items()):
122
+ output.append(f"\n{metric_name}:")
123
+ output.append(" Step | Timestamp | Value")
124
+ output.append(" " + "-" * 48)
125
+ for v in values:
126
+ step = v.get("step", "N/A")
127
+ ts = v.get("timestamp", "N/A")
128
+ val = v.get("value", "N/A")
129
+ output.append(f" {step} | {ts} | {val}")
130
+
131
+ return "\n".join(output)
132
+
133
+
134
+ def format_alerts(alerts: list[dict]) -> str:
135
+ """Format alerts in human-readable format."""
136
+ if not alerts:
137
+ return "No alerts found."
138
+
139
+ output = [f"Found {len(alerts)} alert(s):\n"]
140
+ output.append("Timestamp | Run | Level | Title | Text | Step")
141
+ output.append("-" * 80)
142
+
143
+ for a in alerts:
144
+ ts = a.get("timestamp", "N/A")
145
+ run = a.get("run", "N/A")
146
+ level = a.get("level", "N/A").upper()
147
+ title = a.get("title", "")
148
+ text = a.get("text", "") or ""
149
+ step = a.get("step", "N/A")
150
+ output.append(f"{ts} | {run} | {level} | {title} | {text} | {step}")
151
+
152
+ return "\n".join(output)
153
+
154
+
155
+ def error_exit(message: str, code: int = 1) -> None:
156
+ """Print error message and exit."""
157
+ print(f"Error: {message}", file=sys.stderr)
158
+ sys.exit(code)
trackio/commit_scheduler.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Originally copied from https://github.com/huggingface/huggingface_hub/blob/d0a948fc2a32ed6e557042a95ef3e4af97ec4a7c/src/huggingface_hub/_commit_scheduler.py
2
+
3
+ import atexit
4
+ import logging
5
+ import time
6
+ from concurrent.futures import Future
7
+ from dataclasses import dataclass
8
+ from pathlib import Path
9
+ from threading import Lock, Thread
10
+ from typing import Callable, Dict, List, Union
11
+
12
+ from huggingface_hub.hf_api import (
13
+ DEFAULT_IGNORE_PATTERNS,
14
+ CommitInfo,
15
+ CommitOperationAdd,
16
+ HfApi,
17
+ )
18
+ from huggingface_hub.utils import filter_repo_objects
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ @dataclass(frozen=True)
24
+ class _FileToUpload:
25
+ """Temporary dataclass to store info about files to upload. Not meant to be used directly."""
26
+
27
+ local_path: Path
28
+ path_in_repo: str
29
+ size_limit: int
30
+ last_modified: float
31
+
32
+
33
+ class CommitScheduler:
34
+ """
35
+ Scheduler to upload a local folder to the Hub at regular intervals (e.g. push to hub every 5 minutes).
36
+
37
+ The recommended way to use the scheduler is to use it as a context manager. This ensures that the scheduler is
38
+ properly stopped and the last commit is triggered when the script ends. The scheduler can also be stopped manually
39
+ with the `stop` method. Checkout the [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#scheduled-uploads)
40
+ to learn more about how to use it.
41
+
42
+ Args:
43
+ repo_id (`str`):
44
+ The id of the repo to commit to.
45
+ folder_path (`str` or `Path`):
46
+ Path to the local folder to upload regularly.
47
+ every (`int` or `float`, *optional*):
48
+ The number of minutes between each commit. Defaults to 5 minutes.
49
+ path_in_repo (`str`, *optional*):
50
+ Relative path of the directory in the repo, for example: `"checkpoints/"`. Defaults to the root folder
51
+ of the repository.
52
+ repo_type (`str`, *optional*):
53
+ The type of the repo to commit to. Defaults to `model`.
54
+ revision (`str`, *optional*):
55
+ The revision of the repo to commit to. Defaults to `main`.
56
+ private (`bool`, *optional*):
57
+ Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
58
+ token (`str`, *optional*):
59
+ The token to use to commit to the repo. Defaults to the token saved on the machine.
60
+ allow_patterns (`List[str]` or `str`, *optional*):
61
+ If provided, only files matching at least one pattern are uploaded.
62
+ ignore_patterns (`List[str]` or `str`, *optional*):
63
+ If provided, files matching any of the patterns are not uploaded.
64
+ squash_history (`bool`, *optional*):
65
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
66
+ useful to avoid degraded performances on the repo when it grows too large.
67
+ hf_api (`HfApi`, *optional*):
68
+ The [`HfApi`] client to use to commit to the Hub. Can be set with custom settings (user agent, token,...).
69
+ on_before_commit (`Callable[[], None]`, *optional*):
70
+ If specified, a function that will be called before the CommitScheduler lists files to create a commit.
71
+
72
+ Example:
73
+ ```py
74
+ >>> from pathlib import Path
75
+ >>> from huggingface_hub import CommitScheduler
76
+
77
+ # Scheduler uploads every 10 minutes
78
+ >>> csv_path = Path("watched_folder/data.csv")
79
+ >>> CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path=csv_path.parent, every=10)
80
+
81
+ >>> with csv_path.open("a") as f:
82
+ ... f.write("first line")
83
+
84
+ # Some time later (...)
85
+ >>> with csv_path.open("a") as f:
86
+ ... f.write("second line")
87
+ ```
88
+
89
+ Example using a context manager:
90
+ ```py
91
+ >>> from pathlib import Path
92
+ >>> from huggingface_hub import CommitScheduler
93
+
94
+ >>> with CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path="watched_folder", every=10) as scheduler:
95
+ ... csv_path = Path("watched_folder/data.csv")
96
+ ... with csv_path.open("a") as f:
97
+ ... f.write("first line")
98
+ ... (...)
99
+ ... with csv_path.open("a") as f:
100
+ ... f.write("second line")
101
+
102
+ # Scheduler is now stopped and last commit have been triggered
103
+ ```
104
+ """
105
+
106
+ def __init__(
107
+ self,
108
+ *,
109
+ repo_id: str,
110
+ folder_path: Union[str, Path],
111
+ every: Union[int, float] = 5,
112
+ path_in_repo: str | None = None,
113
+ repo_type: str | None = None,
114
+ revision: str | None = None,
115
+ private: bool | None = None,
116
+ token: str | None = None,
117
+ allow_patterns: list[str] | str | None = None,
118
+ ignore_patterns: list[str] | str | None = None,
119
+ squash_history: bool = False,
120
+ hf_api: HfApi | None = None,
121
+ on_before_commit: Callable[[], None] | None = None,
122
+ ) -> None:
123
+ self.api = hf_api or HfApi(token=token)
124
+ self.on_before_commit = on_before_commit
125
+
126
+ # Folder
127
+ self.folder_path = Path(folder_path).expanduser().resolve()
128
+ self.path_in_repo = path_in_repo or ""
129
+ self.allow_patterns = allow_patterns
130
+
131
+ if ignore_patterns is None:
132
+ ignore_patterns = []
133
+ elif isinstance(ignore_patterns, str):
134
+ ignore_patterns = [ignore_patterns]
135
+ self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS
136
+
137
+ if self.folder_path.is_file():
138
+ raise ValueError(
139
+ f"'folder_path' must be a directory, not a file: '{self.folder_path}'."
140
+ )
141
+ self.folder_path.mkdir(parents=True, exist_ok=True)
142
+
143
+ # Repository
144
+ repo_url = self.api.create_repo(
145
+ repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True
146
+ )
147
+ self.repo_id = repo_url.repo_id
148
+ self.repo_type = repo_type
149
+ self.revision = revision
150
+ self.token = token
151
+
152
+ self.last_uploaded: Dict[Path, float] = {}
153
+ self.last_push_time: float | None = None
154
+
155
+ if not every > 0:
156
+ raise ValueError(f"'every' must be a positive integer, not '{every}'.")
157
+ self.lock = Lock()
158
+ self.every = every
159
+ self.squash_history = squash_history
160
+
161
+ logger.info(
162
+ f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes."
163
+ )
164
+ self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True)
165
+ self._scheduler_thread.start()
166
+ atexit.register(self._push_to_hub)
167
+
168
+ self.__stopped = False
169
+
170
+ def stop(self) -> None:
171
+ """Stop the scheduler.
172
+
173
+ A stopped scheduler cannot be restarted. Mostly for tests purposes.
174
+ """
175
+ self.__stopped = True
176
+
177
+ def __enter__(self) -> "CommitScheduler":
178
+ return self
179
+
180
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
181
+ # Upload last changes before exiting
182
+ self.trigger().result()
183
+ self.stop()
184
+ return
185
+
186
+ def _run_scheduler(self) -> None:
187
+ """Dumb thread waiting between each scheduled push to Hub."""
188
+ while True:
189
+ self.last_future = self.trigger()
190
+ time.sleep(self.every * 60)
191
+ if self.__stopped:
192
+ break
193
+
194
+ def trigger(self) -> Future:
195
+ """Trigger a `push_to_hub` and return a future.
196
+
197
+ This method is automatically called every `every` minutes. You can also call it manually to trigger a commit
198
+ immediately, without waiting for the next scheduled commit.
199
+ """
200
+ return self.api.run_as_future(self._push_to_hub)
201
+
202
+ def _push_to_hub(self) -> CommitInfo | None:
203
+ if self.__stopped: # If stopped, already scheduled commits are ignored
204
+ return None
205
+
206
+ logger.info("(Background) scheduled commit triggered.")
207
+ try:
208
+ value = self.push_to_hub()
209
+ if self.squash_history:
210
+ logger.info("(Background) squashing repo history.")
211
+ self.api.super_squash_history(
212
+ repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision
213
+ )
214
+ return value
215
+ except Exception as e:
216
+ logger.error(
217
+ f"Error while pushing to Hub: {e}"
218
+ ) # Depending on the setup, error might be silenced
219
+ raise
220
+
221
+ def push_to_hub(self) -> CommitInfo | None:
222
+ """
223
+ Push folder to the Hub and return the commit info.
224
+
225
+ <Tip warning={true}>
226
+
227
+ This method is not meant to be called directly. It is run in the background by the scheduler, respecting a
228
+ queue mechanism to avoid concurrent commits. Making a direct call to the method might lead to concurrency
229
+ issues.
230
+
231
+ </Tip>
232
+
233
+ The default behavior of `push_to_hub` is to assume an append-only folder. It lists all files in the folder and
234
+ uploads only changed files. If no changes are found, the method returns without committing anything. If you want
235
+ to change this behavior, you can inherit from [`CommitScheduler`] and override this method. This can be useful
236
+ for example to compress data together in a single file before committing. For more details and examples, check
237
+ out our [integration guide](https://huggingface.co/docs/huggingface_hub/main/en/guides/upload#scheduled-uploads).
238
+ """
239
+ # Check files to upload (with lock)
240
+ with self.lock:
241
+ if self.on_before_commit is not None:
242
+ self.on_before_commit()
243
+
244
+ logger.debug("Listing files to upload for scheduled commit.")
245
+
246
+ # List files from folder (taken from `_prepare_upload_folder_additions`)
247
+ relpath_to_abspath = {
248
+ path.relative_to(self.folder_path).as_posix(): path
249
+ for path in sorted(
250
+ self.folder_path.glob("**/*")
251
+ ) # sorted to be deterministic
252
+ if path.is_file()
253
+ }
254
+ prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else ""
255
+
256
+ # Filter with pattern + filter out unchanged files + retrieve current file size
257
+ files_to_upload: List[_FileToUpload] = []
258
+ for relpath in filter_repo_objects(
259
+ relpath_to_abspath.keys(),
260
+ allow_patterns=self.allow_patterns,
261
+ ignore_patterns=self.ignore_patterns,
262
+ ):
263
+ local_path = relpath_to_abspath[relpath]
264
+ stat = local_path.stat()
265
+ if (
266
+ self.last_uploaded.get(local_path) is None
267
+ or self.last_uploaded[local_path] != stat.st_mtime
268
+ ):
269
+ files_to_upload.append(
270
+ _FileToUpload(
271
+ local_path=local_path,
272
+ path_in_repo=prefix + relpath,
273
+ size_limit=stat.st_size,
274
+ last_modified=stat.st_mtime,
275
+ )
276
+ )
277
+
278
+ # Return if nothing to upload
279
+ if len(files_to_upload) == 0:
280
+ logger.debug("Dropping schedule commit: no changed file to upload.")
281
+ return None
282
+
283
+ # Convert `_FileToUpload` as `CommitOperationAdd` (=> compute file shas + limit to file size)
284
+ logger.debug("Removing unchanged files since previous scheduled commit.")
285
+ add_operations = [
286
+ CommitOperationAdd(
287
+ # TODO: Cap the file to its current size, even if the user append data to it while a scheduled commit is happening
288
+ # (requires an upstream fix for XET-535: `hf_xet` should support `BinaryIO` for upload)
289
+ path_or_fileobj=file_to_upload.local_path,
290
+ path_in_repo=file_to_upload.path_in_repo,
291
+ )
292
+ for file_to_upload in files_to_upload
293
+ ]
294
+
295
+ # Upload files (append mode expected - no need for lock)
296
+ logger.debug("Uploading files for scheduled commit.")
297
+ commit_info = self.api.create_commit(
298
+ repo_id=self.repo_id,
299
+ repo_type=self.repo_type,
300
+ operations=add_operations,
301
+ commit_message="Scheduled Commit",
302
+ revision=self.revision,
303
+ )
304
+
305
+ for file in files_to_upload:
306
+ self.last_uploaded[file.local_path] = file.last_modified
307
+
308
+ self.last_push_time = time.time()
309
+
310
+ return commit_info
trackio/context_vars.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextvars
2
+ from typing import TYPE_CHECKING
3
+
4
+ if TYPE_CHECKING:
5
+ from trackio.run import Run
6
+
7
+ current_run: contextvars.ContextVar["Run | None"] = contextvars.ContextVar(
8
+ "current_run", default=None
9
+ )
10
+ current_project: contextvars.ContextVar[str | None] = contextvars.ContextVar(
11
+ "current_project", default=None
12
+ )
13
+ current_server: contextvars.ContextVar[str | None] = contextvars.ContextVar(
14
+ "current_server", default=None
15
+ )
16
+ current_space_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
17
+ "current_space_id", default=None
18
+ )
trackio/deploy.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib.metadata
2
+ import io
3
+ import os
4
+ import sys
5
+ import threading
6
+ import time
7
+ from importlib.resources import files
8
+ from pathlib import Path
9
+
10
+ if sys.version_info >= (3, 11):
11
+ import tomllib
12
+ else:
13
+ import tomli as tomllib
14
+
15
+ import gradio
16
+ import huggingface_hub
17
+ from gradio_client import Client, handle_file
18
+ from httpx import ReadTimeout
19
+ from huggingface_hub.errors import HfHubHTTPError, RepositoryNotFoundError
20
+
21
+ import trackio
22
+ from trackio.sqlite_storage import SQLiteStorage
23
+ from trackio.utils import get_or_create_project_hash, preprocess_space_and_dataset_ids
24
+
25
+ SPACE_HOST_URL = "https://{user_name}-{space_name}.hf.space/"
26
+ SPACE_URL = "https://huggingface.co/spaces/{space_id}"
27
+
28
+
29
+ def _get_source_install_dependencies() -> str:
30
+ """Get trackio dependencies from pyproject.toml for source installs."""
31
+ trackio_path = files("trackio")
32
+ pyproject_path = Path(trackio_path).parent / "pyproject.toml"
33
+ with open(pyproject_path, "rb") as f:
34
+ pyproject = tomllib.load(f)
35
+ deps = pyproject["project"]["dependencies"]
36
+ spaces_deps = (
37
+ pyproject["project"].get("optional-dependencies", {}).get("spaces", [])
38
+ )
39
+ return "\n".join(deps + spaces_deps)
40
+
41
+
42
+ def _is_trackio_installed_from_source() -> bool:
43
+ """Check if trackio is installed from source/editable install vs PyPI."""
44
+ try:
45
+ trackio_file = trackio.__file__
46
+ if "site-packages" not in trackio_file and "dist-packages" not in trackio_file:
47
+ return True
48
+
49
+ dist = importlib.metadata.distribution("trackio")
50
+ if dist.files:
51
+ files = list(dist.files)
52
+ has_pth = any(".pth" in str(f) for f in files)
53
+ if has_pth:
54
+ return True
55
+
56
+ return False
57
+ except (
58
+ AttributeError,
59
+ importlib.metadata.PackageNotFoundError,
60
+ importlib.metadata.MetadataError,
61
+ ValueError,
62
+ TypeError,
63
+ ):
64
+ return True
65
+
66
+
67
+ def deploy_as_space(
68
+ space_id: str,
69
+ space_storage: huggingface_hub.SpaceStorage | None = None,
70
+ dataset_id: str | None = None,
71
+ private: bool | None = None,
72
+ ):
73
+ if (
74
+ os.getenv("SYSTEM") == "spaces"
75
+ ): # in case a repo with this function is uploaded to spaces
76
+ return
77
+
78
+ trackio_path = files("trackio")
79
+
80
+ hf_api = huggingface_hub.HfApi()
81
+
82
+ try:
83
+ huggingface_hub.create_repo(
84
+ space_id,
85
+ private=private,
86
+ space_sdk="gradio",
87
+ space_storage=space_storage,
88
+ repo_type="space",
89
+ exist_ok=True,
90
+ )
91
+ except HfHubHTTPError as e:
92
+ if e.response.status_code in [401, 403]: # unauthorized or forbidden
93
+ print("Need 'write' access token to create a Spaces repo.")
94
+ huggingface_hub.login(add_to_git_credential=False)
95
+ huggingface_hub.create_repo(
96
+ space_id,
97
+ private=private,
98
+ space_sdk="gradio",
99
+ space_storage=space_storage,
100
+ repo_type="space",
101
+ exist_ok=True,
102
+ )
103
+ else:
104
+ raise ValueError(f"Failed to create Space: {e}")
105
+
106
+ # We can assume pandas, gradio, and huggingface-hub are already installed in a Gradio Space.
107
+ # Make sure necessary dependencies are installed by creating a requirements.txt.
108
+ is_source_install = _is_trackio_installed_from_source()
109
+
110
+ with open(Path(trackio_path, "README.md"), "r") as f:
111
+ readme_content = f.read()
112
+ readme_content = readme_content.replace("{GRADIO_VERSION}", gradio.__version__)
113
+ readme_content = readme_content.replace("{APP_FILE}", "app.py")
114
+ readme_buffer = io.BytesIO(readme_content.encode("utf-8"))
115
+ hf_api.upload_file(
116
+ path_or_fileobj=readme_buffer,
117
+ path_in_repo="README.md",
118
+ repo_id=space_id,
119
+ repo_type="space",
120
+ )
121
+
122
+ if is_source_install:
123
+ requirements_content = _get_source_install_dependencies()
124
+ else:
125
+ requirements_content = f"trackio[spaces]=={trackio.__version__}"
126
+
127
+ requirements_buffer = io.BytesIO(requirements_content.encode("utf-8"))
128
+ hf_api.upload_file(
129
+ path_or_fileobj=requirements_buffer,
130
+ path_in_repo="requirements.txt",
131
+ repo_id=space_id,
132
+ repo_type="space",
133
+ )
134
+
135
+ huggingface_hub.utils.disable_progress_bars()
136
+
137
+ if is_source_install:
138
+ dist_index = (
139
+ Path(trackio.__file__).resolve().parent / "frontend" / "dist" / "index.html"
140
+ )
141
+ if not dist_index.is_file():
142
+ raise ValueError(
143
+ "The Trackio frontend build is missing. From the repository root run "
144
+ "`cd trackio/frontend && npm ci && npm run build`, then deploy again."
145
+ )
146
+ hf_api.upload_folder(
147
+ repo_id=space_id,
148
+ repo_type="space",
149
+ folder_path=trackio_path,
150
+ path_in_repo="trackio",
151
+ ignore_patterns=[
152
+ "README.md",
153
+ "frontend/node_modules/**",
154
+ "frontend/src/**",
155
+ "frontend/.gitignore",
156
+ "frontend/package.json",
157
+ "frontend/package-lock.json",
158
+ "frontend/vite.config.js",
159
+ "frontend/svelte.config.js",
160
+ "**/__pycache__/**",
161
+ "*.pyc",
162
+ ],
163
+ )
164
+
165
+ app_file_content = """import trackio
166
+ trackio.show()"""
167
+ app_file_buffer = io.BytesIO(app_file_content.encode("utf-8"))
168
+ hf_api.upload_file(
169
+ path_or_fileobj=app_file_buffer,
170
+ path_in_repo="app.py",
171
+ repo_id=space_id,
172
+ repo_type="space",
173
+ )
174
+
175
+ if hf_token := huggingface_hub.utils.get_token():
176
+ huggingface_hub.add_space_secret(space_id, "HF_TOKEN", hf_token)
177
+ if dataset_id is not None:
178
+ huggingface_hub.add_space_variable(space_id, "TRACKIO_DATASET_ID", dataset_id)
179
+ if logo_light_url := os.environ.get("TRACKIO_LOGO_LIGHT_URL"):
180
+ huggingface_hub.add_space_variable(
181
+ space_id, "TRACKIO_LOGO_LIGHT_URL", logo_light_url
182
+ )
183
+ if logo_dark_url := os.environ.get("TRACKIO_LOGO_DARK_URL"):
184
+ huggingface_hub.add_space_variable(
185
+ space_id, "TRACKIO_LOGO_DARK_URL", logo_dark_url
186
+ )
187
+ if plot_order := os.environ.get("TRACKIO_PLOT_ORDER"):
188
+ huggingface_hub.add_space_variable(space_id, "TRACKIO_PLOT_ORDER", plot_order)
189
+ if theme := os.environ.get("TRACKIO_THEME"):
190
+ huggingface_hub.add_space_variable(space_id, "TRACKIO_THEME", theme)
191
+ huggingface_hub.add_space_variable(space_id, "GRADIO_MCP_SERVER", "True")
192
+
193
+
194
+ def create_space_if_not_exists(
195
+ space_id: str,
196
+ space_storage: huggingface_hub.SpaceStorage | None = None,
197
+ dataset_id: str | None = None,
198
+ private: bool | None = None,
199
+ ) -> None:
200
+ """
201
+ Creates a new Hugging Face Space if it does not exist.
202
+
203
+ Args:
204
+ space_id (`str`):
205
+ The ID of the Space to create.
206
+ space_storage ([`~huggingface_hub.SpaceStorage`], *optional*):
207
+ Choice of persistent storage tier for the Space.
208
+ dataset_id (`str`, *optional*):
209
+ The ID of the Dataset to add to the Space as a space variable.
210
+ private (`bool`, *optional*):
211
+ Whether to make the Space private. If `None` (default), the repo will be
212
+ public unless the organization's default is private. This value is ignored
213
+ if the repo already exists.
214
+ """
215
+ if "/" not in space_id:
216
+ raise ValueError(
217
+ f"Invalid space ID: {space_id}. Must be in the format: username/reponame or orgname/reponame."
218
+ )
219
+ if dataset_id is not None and "/" not in dataset_id:
220
+ raise ValueError(
221
+ f"Invalid dataset ID: {dataset_id}. Must be in the format: username/datasetname or orgname/datasetname."
222
+ )
223
+ try:
224
+ huggingface_hub.repo_info(space_id, repo_type="space")
225
+ print(f"* Found existing space: {SPACE_URL.format(space_id=space_id)}")
226
+ return
227
+ except RepositoryNotFoundError:
228
+ pass
229
+ except HfHubHTTPError as e:
230
+ if e.response.status_code in [401, 403]: # unauthorized or forbidden
231
+ print("Need 'write' access token to create a Spaces repo.")
232
+ huggingface_hub.login(add_to_git_credential=False)
233
+ else:
234
+ raise ValueError(f"Failed to create Space: {e}")
235
+
236
+ print(f"* Creating new space: {SPACE_URL.format(space_id=space_id)}")
237
+ deploy_as_space(space_id, space_storage, dataset_id, private)
238
+ print("* Waiting for Space to be ready...")
239
+ _wait_until_space_running(space_id)
240
+
241
+
242
+ def _wait_until_space_running(space_id: str, timeout: int = 300) -> None:
243
+ hf_api = huggingface_hub.HfApi()
244
+ start = time.time()
245
+ delay = 2
246
+ while time.time() - start < timeout:
247
+ try:
248
+ info = hf_api.space_info(space_id)
249
+ if info.runtime and info.runtime.stage == "RUNNING":
250
+ return
251
+ except (huggingface_hub.utils.HfHubHTTPError, ReadTimeout):
252
+ pass
253
+ time.sleep(delay)
254
+ delay = min(delay * 1.5, 15)
255
+
256
+
257
+ def wait_until_space_exists(
258
+ space_id: str,
259
+ ) -> None:
260
+ """
261
+ Blocks the current thread until the Space exists.
262
+
263
+ Args:
264
+ space_id (`str`):
265
+ The ID of the Space to wait for.
266
+
267
+ Raises:
268
+ `TimeoutError`: If waiting for the Space takes longer than expected.
269
+ """
270
+ hf_api = huggingface_hub.HfApi()
271
+ delay = 1
272
+ for _ in range(30):
273
+ try:
274
+ hf_api.space_info(space_id)
275
+ return
276
+ except (huggingface_hub.utils.HfHubHTTPError, ReadTimeout):
277
+ time.sleep(delay)
278
+ delay = min(delay * 2, 60)
279
+ raise TimeoutError("Waiting for space to exist took longer than expected")
280
+
281
+
282
+ def upload_db_to_space(project: str, space_id: str, force: bool = False) -> None:
283
+ """
284
+ Uploads the database of a local Trackio project to a Hugging Face Space.
285
+
286
+ This uses the Gradio Client to upload since we do not want to trigger a new build of
287
+ the Space, which would happen if we used `huggingface_hub.upload_file`.
288
+
289
+ Args:
290
+ project (`str`):
291
+ The name of the project to upload.
292
+ space_id (`str`):
293
+ The ID of the Space to upload to.
294
+ force (`bool`, *optional*, defaults to `False`):
295
+ If `True`, overwrites the existing database without prompting. If `False`,
296
+ prompts for confirmation.
297
+ """
298
+ db_path = SQLiteStorage.get_project_db_path(project)
299
+ client = Client(space_id, verbose=False, httpx_kwargs={"timeout": 90})
300
+
301
+ if not force:
302
+ try:
303
+ existing_projects = client.predict(api_name="/get_all_projects")
304
+ if project in existing_projects:
305
+ response = input(
306
+ f"Database for project '{project}' already exists on Space '{space_id}'. "
307
+ f"Overwrite it? (y/N): "
308
+ )
309
+ if response.lower() not in ["y", "yes"]:
310
+ print("* Upload cancelled.")
311
+ return
312
+ except Exception as e:
313
+ print(f"* Warning: Could not check if project exists on Space: {e}")
314
+ print("* Proceeding with upload...")
315
+
316
+ client.predict(
317
+ api_name="/upload_db_to_space",
318
+ project=project,
319
+ uploaded_db=handle_file(db_path),
320
+ hf_token=huggingface_hub.utils.get_token(),
321
+ )
322
+
323
+
324
+ SYNC_BATCH_SIZE = 500
325
+
326
+
327
+ def sync_incremental(
328
+ project: str,
329
+ space_id: str,
330
+ private: bool | None = None,
331
+ pending_only: bool = False,
332
+ ) -> None:
333
+ """
334
+ Syncs a local Trackio project to a Space via the bulk_log API endpoints
335
+ instead of uploading the entire DB file. Supports incremental sync.
336
+
337
+ Args:
338
+ project: The name of the project to sync.
339
+ space_id: The HF Space ID to sync to.
340
+ private: Whether to make the Space private if creating.
341
+ pending_only: If True, only sync rows tagged with space_id (pending data).
342
+ """
343
+ print(
344
+ f"* Syncing project '{project}' to: {SPACE_URL.format(space_id=space_id)} (please wait...)"
345
+ )
346
+ create_space_if_not_exists(space_id, private=private)
347
+ wait_until_space_exists(space_id)
348
+
349
+ client = Client(space_id, verbose=False, httpx_kwargs={"timeout": 90})
350
+ hf_token = huggingface_hub.utils.get_token()
351
+
352
+ if pending_only:
353
+ pending_logs = SQLiteStorage.get_pending_logs(project)
354
+ if pending_logs:
355
+ logs = pending_logs["logs"]
356
+ for i in range(0, len(logs), SYNC_BATCH_SIZE):
357
+ batch = logs[i : i + SYNC_BATCH_SIZE]
358
+ print(
359
+ f" Syncing metrics: {min(i + SYNC_BATCH_SIZE, len(logs))}/{len(logs)}..."
360
+ )
361
+ client.predict(api_name="/bulk_log", logs=batch, hf_token=hf_token)
362
+ SQLiteStorage.clear_pending_logs(project, pending_logs["ids"])
363
+
364
+ pending_sys = SQLiteStorage.get_pending_system_logs(project)
365
+ if pending_sys:
366
+ logs = pending_sys["logs"]
367
+ for i in range(0, len(logs), SYNC_BATCH_SIZE):
368
+ batch = logs[i : i + SYNC_BATCH_SIZE]
369
+ print(
370
+ f" Syncing system metrics: {min(i + SYNC_BATCH_SIZE, len(logs))}/{len(logs)}..."
371
+ )
372
+ client.predict(
373
+ api_name="/bulk_log_system", logs=batch, hf_token=hf_token
374
+ )
375
+ SQLiteStorage.clear_pending_system_logs(project, pending_sys["ids"])
376
+
377
+ pending_uploads = SQLiteStorage.get_pending_uploads(project)
378
+ if pending_uploads:
379
+ upload_entries = []
380
+ for u in pending_uploads["uploads"]:
381
+ fp = u["file_path"]
382
+ if os.path.exists(fp):
383
+ upload_entries.append(
384
+ {
385
+ "project": u["project"],
386
+ "run": u["run"],
387
+ "step": u["step"],
388
+ "relative_path": u["relative_path"],
389
+ "uploaded_file": handle_file(fp),
390
+ }
391
+ )
392
+ if upload_entries:
393
+ print(f" Syncing {len(upload_entries)} media files...")
394
+ client.predict(
395
+ api_name="/bulk_upload_media",
396
+ uploads=upload_entries,
397
+ hf_token=hf_token,
398
+ )
399
+ SQLiteStorage.clear_pending_uploads(project, pending_uploads["ids"])
400
+ else:
401
+ all_logs = SQLiteStorage.get_all_logs_for_sync(project)
402
+ if all_logs:
403
+ for i in range(0, len(all_logs), SYNC_BATCH_SIZE):
404
+ batch = all_logs[i : i + SYNC_BATCH_SIZE]
405
+ print(
406
+ f" Syncing metrics: {min(i + SYNC_BATCH_SIZE, len(all_logs))}/{len(all_logs)}..."
407
+ )
408
+ client.predict(api_name="/bulk_log", logs=batch, hf_token=hf_token)
409
+
410
+ all_sys_logs = SQLiteStorage.get_all_system_logs_for_sync(project)
411
+ if all_sys_logs:
412
+ for i in range(0, len(all_sys_logs), SYNC_BATCH_SIZE):
413
+ batch = all_sys_logs[i : i + SYNC_BATCH_SIZE]
414
+ print(
415
+ f" Syncing system metrics: {min(i + SYNC_BATCH_SIZE, len(all_sys_logs))}/{len(all_sys_logs)}..."
416
+ )
417
+ client.predict(
418
+ api_name="/bulk_log_system", logs=batch, hf_token=hf_token
419
+ )
420
+
421
+ SQLiteStorage.set_project_metadata(project, "space_id", space_id)
422
+ print(f"* Synced successfully to space: {SPACE_URL.format(space_id=space_id)}")
423
+
424
+
425
+ def sync(
426
+ project: str,
427
+ space_id: str | None = None,
428
+ private: bool | None = None,
429
+ force: bool = False,
430
+ run_in_background: bool = False,
431
+ ) -> str:
432
+ """
433
+ Syncs a local Trackio project's database to a Hugging Face Space.
434
+ If the Space does not exist, it will be created.
435
+
436
+ Args:
437
+ project (`str`): The name of the project to upload.
438
+ space_id (`str`, *optional*): The ID of the Space to upload to (e.g., `"username/space_id"`).
439
+ If not provided, checks project metadata first, then generates a random space_id.
440
+ private (`bool`, *optional*):
441
+ Whether to make the Space private. If None (default), the repo will be
442
+ public unless the organization's default is private. This value is ignored
443
+ if the repo already exists.
444
+ force (`bool`, *optional*, defaults to `False`):
445
+ If `True`, overwrite the existing database without prompting for confirmation.
446
+ If `False`, prompt the user before overwriting an existing database.
447
+ run_in_background (`bool`, *optional*, defaults to `False`):
448
+ If `True`, the Space creation and database upload will be run in a background thread.
449
+ If `False`, all the steps will be run synchronously.
450
+ Returns:
451
+ `str`: The Space ID of the synced project.
452
+ """
453
+ if space_id is None:
454
+ space_id = SQLiteStorage.get_space_id(project)
455
+ if space_id is None:
456
+ space_id = f"{project}-{get_or_create_project_hash(project)}"
457
+ space_id, _ = preprocess_space_and_dataset_ids(space_id, None)
458
+
459
+ def _do_sync(space_id: str, private: bool | None = None):
460
+ sync_incremental(project, space_id, private=private, pending_only=False)
461
+
462
+ if run_in_background:
463
+ threading.Thread(target=_do_sync, args=(space_id, private)).start()
464
+ else:
465
+ _do_sync(space_id, private)
466
+ return space_id
trackio/dummy_commit_scheduler.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A dummy object to fit the interface of huggingface_hub's CommitScheduler
2
+ class DummyCommitSchedulerLock:
3
+ def __enter__(self):
4
+ return None
5
+
6
+ def __exit__(self, exception_type, exception_value, exception_traceback):
7
+ pass
8
+
9
+
10
+ class DummyCommitScheduler:
11
+ def __init__(self):
12
+ self.lock = DummyCommitSchedulerLock()
trackio/frontend/dist/assets/index-0Wf8YNCR.css ADDED
@@ -0,0 +1 @@
 
 
1
+ :root{--primary-50: #fff7ed;--primary-100: #ffedd5;--primary-200: #fed7aa;--primary-300: #fdba74;--primary-400: #fb923c;--primary-500: #f97316;--primary-600: #ea580c;--primary-700: #c2410c;--primary-800: #9a3412;--primary-900: #7c2d12;--primary-950: #6c2e12;--secondary-50: #eff6ff;--secondary-100: #dbeafe;--secondary-200: #bfdbfe;--secondary-300: #93c5fd;--secondary-400: #60a5fa;--secondary-500: #3b82f6;--secondary-600: #2563eb;--secondary-700: #1d4ed8;--secondary-800: #1e40af;--secondary-900: #1e3a8a;--secondary-950: #1d3660;--neutral-50: #f9fafb;--neutral-100: #f3f4f6;--neutral-200: #e5e7eb;--neutral-300: #d1d5db;--neutral-400: #9ca3af;--neutral-500: #6b7280;--neutral-600: #4b5563;--neutral-700: #374151;--neutral-800: #1f2937;--neutral-900: #111827;--neutral-950: #0b0f19;--size-0-5: 2px;--size-1: 4px;--size-2: 8px;--size-3: 12px;--size-4: 16px;--size-5: 20px;--size-6: 24px;--size-8: 32px;--size-14: 56px;--size-16: 64px;--size-28: 112px;--size-full: 100%;--spacing-xxs: 1px;--spacing-xs: 2px;--spacing-sm: 4px;--spacing-md: 6px;--spacing-lg: 8px;--spacing-xl: 10px;--spacing-xxl: 16px;--radius-xxs: 1px;--radius-xs: 2px;--radius-sm: 3px;--radius-md: 4px;--radius-lg: 5px;--radius-xl: 8px;--radius-xxl: 12px;--text-xxs: 9px;--text-xs: 10px;--text-sm: 12px;--text-md: 14px;--text-lg: 16px;--text-xl: 22px;--text-xxl: 26px;--line-sm: 1.4;--background-fill-primary: white;--background-fill-secondary: var(--neutral-50);--body-text-color: var(--neutral-900);--body-text-color-subdued: var(--neutral-600);--border-color-primary: var(--neutral-200);--color-accent: var(--primary-500);--color-accent-soft: var(--primary-50);--shadow-drop: rgba(0, 0, 0, .05) 0px 1px 2px 0px;--shadow-drop-lg: 0 1px 3px 0 rgb(0 0 0 / .1), 0 1px 2px -1px rgb(0 0 0 / .1);--shadow-inset: rgba(0, 0, 0, .05) 0px 2px 4px 0px inset;--shadow-spread: 3px;--block-title-text-color: var(--neutral-500);--block-title-text-size: var(--text-md);--block-title-text-weight: 400;--block-info-text-color: var(--body-text-color-subdued);--block-info-text-size: var(--text-sm);--input-background-fill: white;--input-background-fill-focus: var(--primary-500);--input-border-color: var(--border-color-primary);--input-border-color-focus: var(--primary-300);--input-border-width: 1px;--input-padding: var(--spacing-xl);--input-placeholder-color: var(--neutral-400);--input-radius: var(--radius-lg);--input-shadow: 0 0 0 var(--shadow-spread) transparent, var(--shadow-inset);--input-shadow-focus: 0 0 0 var(--shadow-spread) var(--primary-50), var(--shadow-inset);--input-text-size: var(--text-md);--checkbox-background-color: var(--background-fill-primary);--checkbox-background-color-focus: var(--checkbox-background-color);--checkbox-background-color-hover: var(--checkbox-background-color);--checkbox-background-color-selected: var(--primary-600);--checkbox-border-color: var(--neutral-300);--checkbox-border-color-focus: var(--primary-500);--checkbox-border-color-hover: var(--neutral-300);--checkbox-border-color-selected: var(--primary-600);--checkbox-border-radius: var(--radius-sm);--checkbox-border-width: var(--input-border-width);--checkbox-label-gap: var(--spacing-lg);--checkbox-label-padding: var(--spacing-md) calc(2 * var(--spacing-md));--checkbox-label-text-size: var(--text-md);--checkbox-shadow: var(--input-shadow);--checkbox-check: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e");--slider-color: var(--primary-500);--container-radius: var(--radius-lg);--layer-top: 9999}.navbar.svelte-d8j1hi{display:flex;align-items:stretch;border-bottom:1px solid var(--border-color-primary, #e5e7eb);background:var(--background-fill-primary, white);padding:0;flex-shrink:0;min-height:44px}.nav-spacer.svelte-d8j1hi{flex:1}.nav-tabs.svelte-d8j1hi{display:flex;gap:0;padding-right:16px}.nav-link.svelte-d8j1hi{padding:10px 16px;border:none;background:none;color:var(--body-text-color-subdued, #6b7280);font-size:var(--text-md, 14px);cursor:pointer;white-space:nowrap;border-bottom:2px solid transparent;transition:color .15s;font-weight:400}.nav-link.svelte-d8j1hi:hover{color:var(--body-text-color, #1f2937)}.nav-link.active.svelte-d8j1hi{color:var(--body-text-color, #1f2937);border-bottom-color:var(--body-text-color, #1f2937);font-weight:500}.checkbox-group.svelte-17gmtkf{display:flex;flex-direction:column}.checkbox-item.svelte-17gmtkf{display:flex;align-items:center;gap:8px;padding:3px 0;cursor:pointer;font-size:13px}.checkbox-item.svelte-17gmtkf input[type=checkbox]:where(.svelte-17gmtkf){-moz-appearance:none;appearance:none;-webkit-appearance:none;width:16px;height:16px;margin:0;border:1px solid var(--checkbox-border-color, #d1d5db);border-radius:var(--checkbox-border-radius, 4px);background-color:var(--checkbox-background-color, white);box-shadow:var(--checkbox-shadow);cursor:pointer;flex-shrink:0;transition:background-color .15s,border-color .15s}.checkbox-item.svelte-17gmtkf input[type=checkbox]:where(.svelte-17gmtkf):checked{background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected, #f97316);border-color:var(--checkbox-border-color-selected, #f97316)}.checkbox-item.svelte-17gmtkf input[type=checkbox]:where(.svelte-17gmtkf):hover{border-color:var(--checkbox-border-color-hover, #d1d5db)}.color-dot.svelte-17gmtkf{width:10px;height:10px;border-radius:50%;flex-shrink:0}.run-name.svelte-17gmtkf{overflow:hidden;text-overflow:ellipsis;white-space:nowrap;color:var(--body-text-color, #1f2937)}.radio-group.svelte-1sinbnb{display:flex;flex-direction:column;max-height:300px;overflow-y:auto;margin-top:8px}.run-row.svelte-1sinbnb{display:flex;align-items:center;gap:8px;padding:3px 0;cursor:pointer;font-size:13px}.run-row.svelte-1sinbnb input[type=radio]:where(.svelte-1sinbnb){-moz-appearance:none;appearance:none;-webkit-appearance:none;width:16px;height:16px;margin:0;border:1px solid var(--checkbox-border-color, #d1d5db);border-radius:50%;background-color:var(--checkbox-background-color, white);cursor:pointer;flex-shrink:0;transition:border-color .15s,box-shadow .15s}.run-row.svelte-1sinbnb input[type=radio]:where(.svelte-1sinbnb):checked{border-color:var(--checkbox-border-color-selected, #f97316);box-shadow:inset 0 0 0 3px var(--checkbox-background-color-selected, #f97316)}.color-dot.svelte-1sinbnb{width:10px;height:10px;border-radius:50%;flex-shrink:0}.run-name.svelte-1sinbnb{overflow:hidden;text-overflow:ellipsis;white-space:nowrap;color:var(--body-text-color, #1f2937)}.dropdown-container.svelte-kgylqb{width:100%}.label.svelte-kgylqb{display:block;font-size:13px;font-weight:500;color:var(--body-text-color-subdued, #6b7280);margin-bottom:6px}.info.svelte-kgylqb{display:block;font-size:12px;color:var(--body-text-color-subdued, #9ca3af);margin-bottom:4px}.wrap.svelte-kgylqb{position:relative;border-radius:var(--input-radius, 8px);background:var(--input-background-fill, white);border:1px solid var(--border-color-primary, #e5e7eb);transition:border-color .15s,box-shadow .15s}.wrap.focused.svelte-kgylqb{border-color:var(--input-border-color-focus, #fdba74);box-shadow:0 0 0 2px var(--primary-50, #fff7ed)}.wrap-inner.svelte-kgylqb{display:flex;position:relative;align-items:center;padding:0 10px}.secondary-wrap.svelte-kgylqb{display:flex;flex:1;align-items:center}input.svelte-kgylqb{margin:0;outline:none;border:none;background:inherit;width:100%;color:var(--body-text-color, #1f2937);font-size:13px;font-family:inherit;padding:7px 0}input.svelte-kgylqb::placeholder{color:var(--input-placeholder-color, #9ca3af)}input[readonly].svelte-kgylqb{cursor:pointer}.icon-wrap.svelte-kgylqb{color:var(--body-text-color-subdued, #9ca3af);width:16px;flex-shrink:0;pointer-events:none}.options.svelte-kgylqb{position:fixed;z-index:var(--layer-top, 9999);margin:0;padding:4px 0;box-shadow:0 4px 12px #0000001f;border-radius:var(--input-radius, 8px);border:1px solid var(--border-color-primary, #e5e7eb);background:var(--background-fill-primary, white);min-width:fit-content;overflow:auto;color:var(--body-text-color, #1f2937);list-style:none}.item.svelte-kgylqb{display:flex;cursor:pointer;padding:6px 10px;font-size:13px;word-break:break-word}.item.svelte-kgylqb:hover,.item.active.svelte-kgylqb{background:var(--background-fill-secondary, #f9fafb)}.item.selected.svelte-kgylqb{font-weight:500}.check-mark.svelte-kgylqb{padding-right:6px;min-width:16px;font-size:12px}.check-mark.hide.svelte-kgylqb{visibility:hidden}.checkbox-container.svelte-oj84db{display:flex;align-items:center;gap:8px;cursor:pointer;margin:8px 0}.label-text.svelte-oj84db{color:var(--body-text-color, #1f2937);font-size:13px;line-height:1.4}input[type=checkbox].svelte-oj84db{--ring-color: transparent;position:relative;-moz-appearance:none;appearance:none;-webkit-appearance:none;width:16px;height:16px;box-shadow:var(--checkbox-shadow);border:1px solid var(--checkbox-border-color, #d1d5db);border-radius:var(--checkbox-border-radius, 4px);background-color:var(--checkbox-background-color, white);flex-shrink:0;cursor:pointer;transition:background-color .15s,border-color .15s}input[type=checkbox].svelte-oj84db:checked,input[type=checkbox].svelte-oj84db:checked:hover,input[type=checkbox].svelte-oj84db:checked:focus{background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected, #f97316);border-color:var(--checkbox-border-color-selected, #f97316)}input[type=checkbox].svelte-oj84db:hover{border-color:var(--checkbox-border-color-hover, #d1d5db);background-color:var(--checkbox-background-color-hover, white)}input[type=checkbox].svelte-oj84db:focus{border-color:var(--checkbox-border-color-focus, #f97316);background-color:var(--checkbox-background-color-focus, white);outline:none}.slider-wrap.svelte-wei6ev{display:flex;flex-direction:column;width:100%}.head.svelte-wei6ev{margin-bottom:4px;display:flex;justify-content:space-between;align-items:center;width:100%}.label.svelte-wei6ev{flex:1;font-size:13px;font-weight:500;color:var(--body-text-color-subdued, #6b7280)}.info.svelte-wei6ev{display:block;font-size:12px;color:var(--body-text-color-subdued, #9ca3af);margin-bottom:4px}.slider-input-container.svelte-wei6ev{display:flex;align-items:center;gap:6px}input[type=range].svelte-wei6ev{-webkit-appearance:none;-moz-appearance:none;appearance:none;width:100%;cursor:pointer;outline:none;border-radius:var(--radius-xl, 12px);min-width:var(--size-28, 112px);background:transparent}input[type=range].svelte-wei6ev::-webkit-slider-runnable-track{height:6px;border-radius:var(--radius-xl, 12px);background:linear-gradient(to right,var(--slider-color, #f97316) var(--range_progress, 50%),var(--neutral-200, #e5e7eb) var(--range_progress, 50%))}input[type=range].svelte-wei6ev::-webkit-slider-thumb{-webkit-appearance:none;-moz-appearance:none;appearance:none;height:16px;width:16px;background-color:var(--slider-color, #f97316);border:2px solid var(--background-fill-primary, white);border-radius:50%;margin-top:-5px;box-shadow:0 0 0 1px var(--border-color-primary, rgba(0, 0, 0, .08)),0 1px 3px #0003}input[type=range].svelte-wei6ev::-moz-range-track{height:6px;background:var(--neutral-200, #e5e7eb);border-radius:var(--radius-xl, 12px)}input[type=range].svelte-wei6ev::-moz-range-thumb{-webkit-appearance:none;-moz-appearance:none;appearance:none;height:16px;width:16px;background-color:var(--slider-color, #f97316);border:2px solid var(--background-fill-primary, white);border-radius:50%;box-shadow:0 0 0 1px var(--border-color-primary, rgba(0, 0, 0, .08)),0 1px 3px #0003}input[type=range].svelte-wei6ev::-moz-range-progress{height:6px;background-color:var(--slider-color, #f97316);border-radius:var(--radius-xl, 12px)}.bound.svelte-wei6ev{font-size:11px;color:var(--body-text-color-subdued, #9ca3af);min-width:12px;text-align:center}.textbox-container.svelte-6yncpg{width:100%}.label.svelte-6yncpg{display:block;font-size:13px;font-weight:500;color:var(--body-text-color-subdued, #6b7280);margin-bottom:6px}.info.svelte-6yncpg{display:block;font-size:12px;color:var(--body-text-color-subdued, #9ca3af);margin-bottom:4px}.input-wrap.svelte-6yncpg{border-radius:var(--input-radius, 8px);background:var(--input-background-fill, white);border:1px solid var(--border-color-primary, #e5e7eb);transition:border-color .15s,box-shadow .15s}.input-wrap.svelte-6yncpg:focus-within{border-color:var(--input-border-color-focus, #fdba74);box-shadow:0 0 0 2px var(--primary-50, #fff7ed)}input.svelte-6yncpg{width:100%;padding:7px 10px;outline:none;border:none;background:transparent;color:var(--body-text-color, #1f2937);font-size:13px;font-family:inherit;border-radius:var(--input-radius, 8px)}input.svelte-6yncpg::placeholder{color:var(--input-placeholder-color, #9ca3af)}.sidebar.svelte-181dlmc{width:290px;min-width:290px;background:var(--background-fill-primary, white);border-right:1px solid var(--border-color-primary, #e5e7eb);display:flex;flex-direction:column;position:relative;overflow:hidden;transition:width .2s,min-width .2s}.sidebar.collapsed.svelte-181dlmc{width:40px;min-width:40px}.toggle-btn.svelte-181dlmc{position:absolute;top:12px;right:8px;z-index:10;border:none;background:none;color:var(--body-text-color-subdued, #9ca3af);cursor:pointer;padding:4px;display:flex;align-items:center;justify-content:center;border-radius:var(--radius-sm, 4px);transition:color .15s,background-color .15s}.toggle-btn.svelte-181dlmc:hover{color:var(--body-text-color, #1f2937);background-color:var(--background-fill-secondary, #f9fafb)}.sidebar-content.svelte-181dlmc{padding:16px;flex:1;min-height:0;display:flex;flex-direction:column}.sidebar-scroll.svelte-181dlmc{overflow-y:auto;flex:1;min-height:0}.oauth-footer.svelte-181dlmc{flex-shrink:0;margin-top:12px;padding-top:12px;border-top:1px solid var(--border-color-primary, #e5e7eb)}.oauth-line.svelte-181dlmc{margin:0;font-size:12px;line-height:1.4;color:var(--body-text-color-subdued, #6b7280)}.oauth-warn.svelte-181dlmc{color:var(--body-text-color, #92400e)}.hf-login-btn.svelte-181dlmc{display:inline-flex;align-items:center;justify-content:center;gap:8px;width:100%;padding:8px 12px;font-size:13px;font-weight:600;color:#fff;background:#141c2e;border-radius:var(--radius-lg, 8px);text-decoration:none;border:none;cursor:pointer;box-sizing:border-box}.hf-login-btn.svelte-181dlmc:hover{background:#283042}.hf-logo.svelte-181dlmc{width:20px;height:20px;flex-shrink:0}.oauth-hint.svelte-181dlmc{margin:8px 0 0;font-size:11px;line-height:1.35;color:var(--body-text-color-subdued, #9ca3af)}.oauth-signed-in.svelte-181dlmc{margin:0;font-size:12px;color:var(--body-text-color-subdued, #6b7280)}.oauth-logout.svelte-181dlmc{font-size:12px;color:var(--body-text-color-subdued, #9ca3af);text-decoration:none;cursor:pointer}.oauth-logout.svelte-181dlmc:hover{text-decoration:underline;color:var(--body-text-color, #1f2937)}.logo-section.svelte-181dlmc{margin-bottom:20px}.logo.svelte-181dlmc{width:80%;max-width:200px}.section.svelte-181dlmc{margin-bottom:18px}.section-label.svelte-181dlmc{font-size:13px;font-weight:500;color:var(--body-text-color-subdued, #6b7280)}.locked-project.svelte-181dlmc{margin-top:4px;font-size:13px;font-weight:500;color:var(--body-text-color, #1f2937);padding:8px 10px;border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-md, 6px);background:var(--background-fill-secondary, #f9fafb)}.runs-header.svelte-181dlmc{display:flex;align-items:center;justify-content:space-between;margin-bottom:6px}.latest-toggle.svelte-181dlmc{display:flex;align-items:center;gap:6px;font-size:12px;color:var(--body-text-color-subdued, #6b7280);cursor:pointer}.latest-toggle.svelte-181dlmc input[type=checkbox]:where(.svelte-181dlmc){-moz-appearance:none;appearance:none;-webkit-appearance:none;width:16px;height:16px;margin:0;border:1px solid var(--checkbox-border-color, #d1d5db);border-radius:var(--checkbox-border-radius, 4px);background-color:var(--checkbox-background-color, white);box-shadow:var(--checkbox-shadow);cursor:pointer;flex-shrink:0;transition:background-color .15s,border-color .15s}.latest-toggle.svelte-181dlmc input[type=checkbox]:where(.svelte-181dlmc):checked{background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected, #f97316);border-color:var(--checkbox-border-color-selected, #f97316)}.checkbox-list.svelte-181dlmc{max-height:300px;overflow-y:auto;margin-top:8px}.alert-panel.svelte-x5aqew{position:fixed;bottom:16px;right:16px;width:380px;max-height:400px;background:var(--background-fill-primary, white);border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-lg, 8px);box-shadow:var(--shadow-drop-lg);z-index:1000;overflow:hidden;display:flex;flex-direction:column}.alert-header.svelte-x5aqew{padding:10px 12px;border-bottom:1px solid var(--border-color-primary, #e5e7eb);display:flex;align-items:center;justify-content:space-between}.alert-title.svelte-x5aqew{font-size:13px;font-weight:600;color:var(--body-text-color, #1f2937)}.filter-pills.svelte-x5aqew{display:flex;gap:4px}.pill.svelte-x5aqew{border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-xxl, 22px);padding:2px 8px;font-size:11px;background:var(--background-fill-secondary, #f9fafb);color:var(--body-text-color-subdued, #6b7280);cursor:pointer}.pill.active.svelte-x5aqew{background:var(--color-accent, #f97316);color:#fff;border-color:var(--color-accent, #f97316)}.alert-list.svelte-x5aqew{overflow-y:auto;flex:1}.alert-item.svelte-x5aqew{border-bottom:1px solid var(--neutral-100, #f3f4f6)}.alert-row.svelte-x5aqew{display:flex;align-items:center;gap:8px;width:100%;padding:8px 12px;border:none;background:none;text-align:left;cursor:pointer;font-size:var(--text-sm, 12px)}.alert-row.svelte-x5aqew:hover{background:var(--background-fill-secondary, #f9fafb)}.alert-text.svelte-x5aqew{flex:1;color:var(--body-text-color, #1f2937)}.alert-meta.svelte-x5aqew{font-size:var(--text-xs, 10px);color:var(--body-text-color-subdued, #9ca3af);white-space:nowrap}.alert-detail.svelte-x5aqew{padding:4px 12px 8px 32px;font-size:var(--text-sm, 12px);color:var(--body-text-color-subdued, #6b7280)}.plot-container.svelte-9thu1j{min-width:350px;flex:1;background:var(--background-fill-primary, white);border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-lg, 8px);padding:12px;overflow:hidden;position:relative}.plot-container[draggable=true].svelte-9thu1j{cursor:grab}.plot-container[draggable=true].svelte-9thu1j:active{cursor:grabbing}.hidden-plot.svelte-9thu1j{visibility:hidden;height:0;padding:0;margin:0;border:none;overflow:hidden;pointer-events:none}.drag-handle.svelte-9thu1j{position:absolute;top:8px;left:8px;color:var(--body-text-color-subdued, #9ca3af);opacity:0;transition:opacity .15s;z-index:5}.plot-container.svelte-9thu1j:hover .drag-handle:where(.svelte-9thu1j){opacity:.5}.drag-handle.svelte-9thu1j:hover{opacity:1!important}.plot-toolbar.svelte-9thu1j{position:absolute;top:8px;right:8px;display:flex;gap:4px;z-index:5;opacity:0;transition:opacity .15s}.plot-container.svelte-9thu1j:hover .plot-toolbar:where(.svelte-9thu1j){opacity:1}.toolbar-btn.svelte-9thu1j{border:1px solid var(--border-color-primary, #e5e7eb);background:var(--background-fill-primary, white);color:var(--body-text-color-subdued, #6b7280);cursor:pointer;padding:4px 6px;border-radius:var(--radius-sm, 4px);display:flex;align-items:center;justify-content:center}.toolbar-btn.svelte-9thu1j:hover{background:var(--neutral-100, #f3f4f6);color:var(--body-text-color, #1f2937)}.plot-chart-wrap.svelte-9thu1j{position:relative;width:100%}.plot-chart-wrap--fs.svelte-9thu1j{flex:1;min-height:0;display:flex;flex-direction:column}.reset-zoom-btn.svelte-9thu1j{position:absolute;bottom:1px;right:1px;z-index:6;display:inline-flex;align-items:center;justify-content:center;margin:0;min-width:52px;padding:5px 12px 5px 10px;border:none;border-radius:4px;background:transparent;color:var(--body-text-color-subdued, #334155);cursor:pointer;opacity:.92;transform:translateY(6px);transition:opacity .15s ease,color .15s ease,background .15s ease;box-shadow:none}.reset-zoom-btn.svelte-9thu1j:hover{opacity:1;color:var(--body-text-color, #0f172a);background:var(--background-fill-secondary, rgba(226, 232, 240, .85));transform:translateY(6px)}.reset-zoom-btn.svelte-9thu1j svg:where(.svelte-9thu1j){display:block;flex-shrink:0;filter:drop-shadow(0 0 .5px rgba(255,255,255,.95))}.plot.svelte-9thu1j{width:100%}.plot.svelte-9thu1j .vega-embed{width:100%!important}.plot.svelte-9thu1j .vega-embed summary{display:none}.fullscreen-host.svelte-9thu1j{position:fixed;top:0;right:0;bottom:0;left:0;z-index:10000;box-sizing:border-box;display:flex;flex-direction:column;background:var(--background-fill-primary, white);padding:12px;gap:8px;pointer-events:auto}.fullscreen-host.svelte-9thu1j:fullscreen{width:100%;height:100%}.fullscreen-host.svelte-9thu1j:-webkit-full-screen{width:100%;height:100%}.fullscreen-toolbar.svelte-9thu1j{flex-shrink:0;display:flex;justify-content:flex-end;gap:4px;z-index:5}.fullscreen-chart-wrap.svelte-9thu1j{flex:1;min-height:0;display:flex;flex-direction:column}.fullscreen-legend.svelte-9thu1j{flex-shrink:0}.fullscreen-plot.svelte-9thu1j{flex:1;min-height:0;width:100%;overflow:hidden}.fullscreen-plot.svelte-9thu1j .vega-embed{width:100%!important;height:100%!important;min-height:0;display:flex;flex-direction:column}.fullscreen-plot.svelte-9thu1j .vega-embed .vega-view{flex:1;min-height:0}.fullscreen-plot.svelte-9thu1j .vega-embed summary{display:none}.custom-legend.svelte-9thu1j{display:flex;align-items:center;justify-content:center;gap:12px;padding:6px 0 0;flex-wrap:wrap}.legend-title.svelte-9thu1j{font-size:11px;color:var(--body-text-color-subdued, #6b7280);font-weight:600}.legend-item.svelte-9thu1j{display:flex;align-items:center;gap:4px}.legend-dot.svelte-9thu1j{width:10px;height:10px;border-radius:50%;flex-shrink:0}.legend-label.svelte-9thu1j{font-size:11px;color:var(--body-text-color-subdued, #6b7280)}.accordion.svelte-1jep0a{margin-bottom:12px;border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-lg, 8px);background:var(--background-fill-primary, white);overflow:hidden}.accordion-hidden.svelte-1jep0a{margin-bottom:8px}.accordion-header.svelte-1jep0a{display:flex;align-items:center;gap:8px;width:100%;padding:10px 14px;border:none;background:var(--background-fill-primary, white);color:var(--body-text-color, #1f2937);font-size:var(--text-md, 14px);font-weight:600;cursor:pointer;text-align:left}.accordion-header.svelte-1jep0a:hover{background:var(--background-fill-secondary, #f9fafb)}.arrow.svelte-1jep0a{font-size:14px;transition:transform .15s;color:var(--body-text-color, #1f2937);display:inline-block}.arrow.svelte-1jep0a:not(.rotated){transform:rotate(-90deg)}.accordion-body.svelte-1jep0a{padding:0 14px 14px}.trackio-loading.svelte-1kc6b2l{display:flex;align-items:center;justify-content:center;width:100%;min-height:min(70vh,640px);padding:32px 24px;box-sizing:border-box;background:transparent}.logo-stack.svelte-1kc6b2l{position:relative;width:min(100%,200px);max-width:min(92vw,200px);line-height:0;background:transparent;isolation:isolate}.logo-base.svelte-1kc6b2l{display:block;background:transparent}.logo-img.svelte-1kc6b2l{width:100%;height:auto;display:block;background:transparent}.logo-overlay.svelte-1kc6b2l{position:absolute;left:0;top:0;width:100%;animation:svelte-1kc6b2l-trackio-logo-sweep 4s linear infinite;pointer-events:none;background:transparent}.logo-overlay.svelte-1kc6b2l .logo-img:where(.svelte-1kc6b2l){width:100%;height:auto;object-position:left center}.logo-img--gray.svelte-1kc6b2l{filter:grayscale(1)}@keyframes svelte-1kc6b2l-trackio-logo-sweep{0%{clip-path:inset(0 0 0 0)}50%{clip-path:inset(0 0 0 100%)}to{clip-path:inset(0 0 0 0)}}@media(prefers-reduced-motion:reduce){.logo-overlay.svelte-1kc6b2l{display:none}}.sr-only.svelte-1kc6b2l{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border:0}.metrics-page.svelte-2bul55{padding:20px 24px;overflow-y:auto;flex:1;min-height:0}.plot-grid.svelte-2bul55{display:flex;flex-wrap:wrap;gap:16px}.empty-state.svelte-2bul55{max-width:640px;padding:40px 24px;color:var(--body-text-color, #1f2937)}.empty-state.svelte-2bul55 h2:where(.svelte-2bul55){margin:0 0 8px;font-size:20px;font-weight:700}.empty-state.svelte-2bul55 p:where(.svelte-2bul55){margin:12px 0 8px;color:var(--body-text-color-subdued, #6b7280)}.empty-state.svelte-2bul55 pre:where(.svelte-2bul55){background:var(--background-fill-secondary, #f9fafb);padding:16px;border-radius:var(--radius-lg, 8px);border:1px solid var(--border-color-primary, #e5e7eb);font-size:13px;overflow-x:auto}.empty-state.svelte-2bul55 code:where(.svelte-2bul55){background:var(--background-fill-secondary, #f0f0f0);padding:1px 5px;border-radius:var(--radius-sm, 4px);font-size:13px}.empty-state.svelte-2bul55 pre:where(.svelte-2bul55) code:where(.svelte-2bul55){background:none;padding:0}.system-page.svelte-nv5os4{padding:20px 24px;overflow-y:auto;flex:1;min-height:0}.plot-grid.svelte-nv5os4{display:flex;flex-wrap:wrap;gap:12px}.empty-state.svelte-nv5os4{max-width:640px;padding:40px 24px;color:var(--body-text-color, #1f2937)}.empty-state.svelte-nv5os4 h2:where(.svelte-nv5os4){margin:0 0 8px;font-size:20px;font-weight:700}.empty-state.svelte-nv5os4 p:where(.svelte-nv5os4){margin:12px 0 8px;color:var(--body-text-color-subdued, #6b7280)}.empty-state.svelte-nv5os4 pre:where(.svelte-nv5os4){background:var(--background-fill-secondary, #f9fafb);padding:16px;border-radius:var(--radius-lg, 8px);border:1px solid var(--border-color-primary, #e5e7eb);font-size:13px;overflow-x:auto}.empty-state.svelte-nv5os4 ul:where(.svelte-nv5os4){list-style:disc;padding-left:20px;margin:4px 0 0}.empty-state.svelte-nv5os4 li:where(.svelte-nv5os4){margin:4px 0;color:var(--body-text-color, #1f2937)}.empty-state.svelte-nv5os4 code:where(.svelte-nv5os4){background:var(--background-fill-secondary, #f0f0f0);padding:1px 5px;border-radius:var(--radius-sm, 4px);font-size:13px}.empty-state.svelte-nv5os4 pre:where(.svelte-nv5os4) code:where(.svelte-nv5os4){background:none;padding:0}.table-container.svelte-1cp60rw{display:flex;flex-direction:column;gap:var(--size-2, 8px);position:relative}.header-row.svelte-1cp60rw{display:flex;justify-content:flex-end;align-items:center;min-height:var(--size-6, 24px);width:100%}.header-row.svelte-1cp60rw .label:where(.svelte-1cp60rw){flex:1;margin:0;color:var(--block-label-text-color, var(--neutral-500, #6b7280));font-size:var(--block-label-text-size, 12px);line-height:var(--line-sm, 1.4)}.table-wrap.svelte-1cp60rw{position:relative;overflow:auto;border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--table-radius, var(--radius-lg, 8px))}table.svelte-1cp60rw{width:100%;table-layout:auto;color:var(--body-text-color, #1f2937);font-size:var(--input-text-size, 14px);line-height:var(--line-sm, 1.4);border-spacing:0;border-collapse:separate}thead.svelte-1cp60rw{position:sticky;top:0;z-index:5;box-shadow:var(--shadow-drop, rgba(0,0,0,.05) 0px 1px 2px 0px)}th.svelte-1cp60rw{padding:0;background:var(--table-even-background-fill, white);border-right-width:0px;border-left-width:1px;border-bottom-width:1px;border-style:solid;border-color:var(--border-color-primary, #e5e7eb);text-align:left;cursor:pointer;-webkit-user-select:none;user-select:none}th.first.svelte-1cp60rw{border-left-width:0;border-top-left-radius:var(--table-radius, var(--radius-lg, 8px))}th.last.svelte-1cp60rw{border-top-right-radius:var(--table-radius, var(--radius-lg, 8px))}.th-inner.svelte-1cp60rw{padding:var(--size-2, 8px);display:flex;align-items:center;gap:4px;font-weight:600;font-size:var(--text-sm, 12px);white-space:nowrap}.sort-arrow.svelte-1cp60rw{font-size:10px;color:var(--body-text-color-subdued, #9ca3af)}td.svelte-1cp60rw{padding:var(--size-2, 8px);border-right-width:0px;border-left-width:1px;border-bottom-width:1px;border-style:solid;border-color:var(--border-color-primary, #e5e7eb);font-size:var(--text-sm, 12px)}td.first.svelte-1cp60rw{border-left-width:0}tr.svelte-1cp60rw{background:var(--table-even-background-fill, white);border-bottom:1px solid var(--border-color-primary, #e5e7eb);text-align:left}tr.row-odd.svelte-1cp60rw{background:var(--table-odd-background-fill, var(--neutral-50, #f9fafb))}tr.selected.svelte-1cp60rw{background:var(--color-accent-soft, var(--primary-50, #fff7ed))}tr.svelte-1cp60rw:last-child td.first:where(.svelte-1cp60rw){border-bottom-left-radius:var(--table-radius, var(--radius-lg, 8px))}tr.svelte-1cp60rw:last-child td.last:where(.svelte-1cp60rw){border-bottom-right-radius:var(--table-radius, var(--radius-lg, 8px))}.check-col.svelte-1cp60rw{width:40px;text-align:center;padding:var(--size-2, 8px);border-left-width:0}.check-col.svelte-1cp60rw input[type=checkbox]:where(.svelte-1cp60rw){-moz-appearance:none;appearance:none;-webkit-appearance:none;width:16px;height:16px;border:1px solid var(--checkbox-border-color, #d1d5db);border-radius:var(--checkbox-border-radius, 4px);background-color:var(--checkbox-background-color, white);cursor:pointer;flex-shrink:0;transition:background-color .15s,border-color .15s}.check-col.svelte-1cp60rw input[type=checkbox]:where(.svelte-1cp60rw):checked{background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected, #2563eb);border-color:var(--checkbox-border-color-selected, #2563eb)}.media-page.svelte-outb32{padding:20px 24px;overflow-y:auto;flex:1}.section-title.svelte-outb32{font-size:var(--text-lg, 16px);font-weight:600;color:var(--body-text-color, #1f2937);margin:16px 0 8px}.gallery.svelte-outb32{display:grid;grid-template-columns:repeat(auto-fill,minmax(200px,1fr));gap:12px}.gallery-item.svelte-outb32{border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-lg, 8px);overflow:hidden;background:var(--background-fill-secondary, #f9fafb)}.gallery-item.svelte-outb32 img:where(.svelte-outb32),.gallery-item.svelte-outb32 video:where(.svelte-outb32){width:100%;display:block}.caption.svelte-outb32{padding:4px 8px;font-size:var(--text-sm, 12px);color:var(--body-text-color-subdued, #9ca3af)}.step-label.svelte-outb32{padding:4px 8px;font-size:var(--text-xs, 10px);color:var(--body-text-color-subdued, #9ca3af)}.audio-list.svelte-outb32{display:flex;flex-direction:column;gap:8px}.audio-item.svelte-outb32{display:flex;align-items:center;gap:12px;padding:8px;border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-lg, 8px)}.audio-label.svelte-outb32{font-size:var(--text-sm, 12px);color:var(--body-text-color-subdued, #9ca3af);min-width:120px}.table-section.svelte-outb32{margin-bottom:16px}.empty-state.svelte-outb32{max-width:640px;padding:40px 24px;color:var(--body-text-color, #1f2937)}.empty-state.svelte-outb32 h2:where(.svelte-outb32){margin:0 0 8px;font-size:20px;font-weight:700}.empty-state.svelte-outb32 p:where(.svelte-outb32){margin:12px 0 8px;color:var(--body-text-color-subdued, #6b7280)}.empty-state.svelte-outb32 pre:where(.svelte-outb32){background:var(--background-fill-secondary, #f9fafb);padding:16px;border-radius:var(--radius-lg, 8px);border:1px solid var(--border-color-primary, #e5e7eb);font-size:13px;overflow-x:auto}.empty-state.svelte-outb32 code:where(.svelte-outb32){background:var(--background-fill-secondary, #f0f0f0);padding:1px 5px;border-radius:var(--radius-sm, 4px);font-size:13px}.empty-state.svelte-outb32 pre:where(.svelte-outb32) code:where(.svelte-outb32){background:none;padding:0}.reports-page.svelte-iufsej{padding:20px 24px;overflow-y:auto;flex:1}.controls.svelte-iufsej{display:flex;gap:16px;margin-bottom:16px;flex-wrap:wrap;align-items:flex-end}.control.svelte-iufsej{min-width:200px}.block-title.svelte-iufsej{display:block;font-size:var(--block-title-text-size, 14px);font-weight:var(--block-title-text-weight, 400);color:var(--block-title-text-color, #6b7280);margin-bottom:var(--spacing-lg, 8px)}.filter-pills.svelte-iufsej{display:flex;gap:4px}.pill.svelte-iufsej{border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-xxl, 22px);padding:4px 12px;font-size:var(--text-sm, 12px);background:var(--background-fill-secondary, #f9fafb);color:var(--body-text-color-subdued, #6b7280);cursor:pointer;transition:background-color .15s,color .15s}.pill.svelte-iufsej:hover{background:var(--neutral-100, #f3f4f6)}.pill.active.svelte-iufsej{background:var(--color-accent, #f97316);color:#fff;border-color:var(--color-accent, #f97316)}.empty-state.svelte-iufsej{max-width:640px;padding:40px 24px;color:var(--body-text-color, #1f2937)}.empty-state.svelte-iufsej h2:where(.svelte-iufsej){margin:0 0 8px;font-size:20px;font-weight:700}.empty-state.svelte-iufsej p:where(.svelte-iufsej){margin:12px 0 8px;color:var(--body-text-color-subdued, #6b7280)}.empty-state.svelte-iufsej pre:where(.svelte-iufsej){background:var(--background-fill-secondary, #f9fafb);padding:16px;border-radius:var(--radius-lg, 8px);border:1px solid var(--border-color-primary, #e5e7eb);font-size:13px;overflow-x:auto}.empty-state.svelte-iufsej code:where(.svelte-iufsej){background:var(--background-fill-secondary, #f0f0f0);padding:1px 5px;border-radius:var(--radius-sm, 4px);font-size:13px}.empty-state.svelte-iufsej pre:where(.svelte-iufsej) code:where(.svelte-iufsej){background:none;padding:0}.alerts-table.svelte-iufsej{width:100%;border-collapse:collapse;font-size:var(--text-md, 14px)}.alerts-table.svelte-iufsej th:where(.svelte-iufsej){text-align:left;padding:8px 12px;border-bottom:2px solid var(--border-color-primary, #e5e7eb);color:var(--body-text-color-subdued, #6b7280);font-weight:600;font-size:var(--text-sm, 12px);text-transform:uppercase;letter-spacing:.05em}.alerts-table.svelte-iufsej td:where(.svelte-iufsej){padding:8px 12px;border-bottom:1px solid var(--border-color-primary, #e5e7eb);color:var(--body-text-color, #1f2937)}.alerts-table.svelte-iufsej tbody:where(.svelte-iufsej) tr:where(.svelte-iufsej):nth-child(odd){background:var(--table-odd-background-fill, var(--background-fill-primary, white))}.alerts-table.svelte-iufsej tbody:where(.svelte-iufsej) tr:where(.svelte-iufsej):nth-child(2n){background:var(--table-even-background-fill, var(--background-fill-secondary, #f9fafb))}.alerts-table.svelte-iufsej tr:where(.svelte-iufsej):hover{background:var(--background-fill-secondary, #f3f4f6)}.runs-page.svelte-1yb6d54{padding:20px 24px;overflow-y:auto;flex:1}.empty-state.svelte-1yb6d54{max-width:640px;padding:40px 24px;color:var(--body-text-color, #1f2937)}.empty-state.svelte-1yb6d54 h2:where(.svelte-1yb6d54){margin:0 0 8px;font-size:20px;font-weight:700}.empty-state.svelte-1yb6d54 p:where(.svelte-1yb6d54){margin:12px 0 8px;color:var(--body-text-color-subdued, #6b7280)}.empty-state.svelte-1yb6d54 pre:where(.svelte-1yb6d54){background:var(--background-fill-secondary, #f9fafb);padding:16px;border-radius:var(--radius-lg, 8px);border:1px solid var(--border-color-primary, #e5e7eb);font-size:13px;overflow-x:auto}.empty-state.svelte-1yb6d54 code:where(.svelte-1yb6d54){background:var(--background-fill-secondary, #f0f0f0);padding:1px 5px;border-radius:var(--radius-sm, 4px);font-size:13px}.empty-state.svelte-1yb6d54 pre:where(.svelte-1yb6d54) code:where(.svelte-1yb6d54){background:none;padding:0}.runs-table.svelte-1yb6d54{width:100%;border-collapse:collapse;font-size:var(--text-md, 14px)}.runs-table.svelte-1yb6d54 th:where(.svelte-1yb6d54){text-align:left;padding:8px 12px;border-bottom:2px solid var(--border-color-primary, #e5e7eb);color:var(--body-text-color-subdued, #6b7280);font-weight:600;font-size:var(--text-sm, 12px);text-transform:uppercase;letter-spacing:.05em}.runs-table.svelte-1yb6d54 td:where(.svelte-1yb6d54){padding:8px 12px;border-bottom:1px solid var(--border-color-primary, #e5e7eb);color:var(--body-text-color, #1f2937)}.runs-table.svelte-1yb6d54 tbody:where(.svelte-1yb6d54) tr:where(.svelte-1yb6d54):nth-child(odd){background:var(--table-odd-background-fill, var(--background-fill-primary, white))}.runs-table.svelte-1yb6d54 tbody:where(.svelte-1yb6d54) tr:where(.svelte-1yb6d54):nth-child(2n){background:var(--table-even-background-fill, var(--background-fill-secondary, #f9fafb))}.runs-table.svelte-1yb6d54 tr:where(.svelte-1yb6d54):hover{background:var(--background-fill-secondary, #f3f4f6)}.run-name-cell.svelte-1yb6d54{font-weight:500}.run-name-with-dot.svelte-1yb6d54{display:inline-flex;align-items:center;gap:8px;max-width:100%}.run-dot.svelte-1yb6d54{width:10px;height:10px;border-radius:50%;flex-shrink:0}.link-btn.svelte-1yb6d54{background:none;border:none;color:var(--color-accent, #f97316);cursor:pointer;font:inherit;font-weight:500;padding:0;text-align:left}.link-btn.svelte-1yb6d54:hover{text-decoration:underline}.rename-input.svelte-1yb6d54{font:inherit;padding:2px 6px;border:1px solid var(--color-accent, #f97316);border-radius:var(--radius-sm, 4px);outline:none;width:100%}.actions-cell.svelte-1yb6d54{display:flex;gap:4px}.action-btn.svelte-1yb6d54{background:none;border:1px solid transparent;color:var(--body-text-color-subdued, #6b7280);cursor:pointer;padding:4px;border-radius:var(--radius-sm, 4px);display:flex;align-items:center}.action-btn.svelte-1yb6d54:hover{background:var(--background-fill-secondary, #f9fafb);border-color:var(--border-color-primary, #e5e7eb);color:var(--body-text-color, #1f2937)}.delete-btn.svelte-1yb6d54:hover{color:#dc2626;border-color:#fecaca;background:#fef2f2}.action-btn.svelte-1yb6d54:disabled{opacity:.45;cursor:not-allowed;pointer-events:none}.run-detail-page.svelte-1bpgsx2{padding:20px 24px;overflow-y:auto;flex:1}.detail-card.svelte-1bpgsx2{background:var(--background-fill-primary, white);border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-lg, 8px);padding:24px;max-width:800px}.detail-card.svelte-1bpgsx2 h2:where(.svelte-1bpgsx2){color:var(--body-text-color, #1f2937);margin:0 0 16px;font-size:var(--text-xl, 22px)}.detail-card.svelte-1bpgsx2 h3:where(.svelte-1bpgsx2){color:var(--body-text-color, #1f2937);margin:20px 0 8px;font-size:var(--text-lg, 16px)}.detail-grid.svelte-1bpgsx2{display:grid;grid-template-columns:repeat(auto-fill,minmax(200px,1fr));gap:12px}.detail-item.svelte-1bpgsx2{display:flex;flex-direction:column;gap:2px}.detail-label.svelte-1bpgsx2{font-size:var(--text-xs, 10px);font-weight:600;color:var(--body-text-color-subdued, #9ca3af);text-transform:uppercase}.detail-value.svelte-1bpgsx2{font-size:var(--text-md, 14px);color:var(--body-text-color, #1f2937)}.config-block.svelte-1bpgsx2{background:var(--background-fill-secondary, #f9fafb);padding:12px;border-radius:var(--radius-lg, 8px);border:1px solid var(--border-color-primary, #e5e7eb);font-size:var(--text-sm, 12px);color:var(--body-text-color, #1f2937);overflow-x:auto}.empty-state.svelte-1bpgsx2{max-width:640px;padding:40px 24px;color:var(--body-text-color, #1f2937)}.empty-state.svelte-1bpgsx2 h2:where(.svelte-1bpgsx2){margin:0 0 8px;font-size:20px;font-weight:700}.empty-state.svelte-1bpgsx2 p:where(.svelte-1bpgsx2){margin:12px 0 8px;color:var(--body-text-color-subdued, #6b7280)}.empty-state.svelte-1bpgsx2 pre:where(.svelte-1bpgsx2){background:var(--background-fill-secondary, #f9fafb);padding:16px;border-radius:var(--radius-lg, 8px);border:1px solid var(--border-color-primary, #e5e7eb);font-size:13px;overflow-x:auto}.empty-state.svelte-1bpgsx2 code:where(.svelte-1bpgsx2){background:var(--background-fill-secondary, #f0f0f0);padding:1px 5px;border-radius:var(--radius-sm, 4px);font-size:13px}.empty-state.svelte-1bpgsx2 pre:where(.svelte-1bpgsx2) code:where(.svelte-1bpgsx2){background:none;padding:0}.files-page.svelte-1xvfk9n{padding:20px 24px;overflow-y:auto;flex:1}h2.svelte-1xvfk9n{color:var(--body-text-color, #1f2937);font-size:var(--text-xl, 22px);margin-bottom:16px}.file-list.svelte-1xvfk9n{list-style:none;padding:0}.file-list.svelte-1xvfk9n li:where(.svelte-1xvfk9n){padding:8px 12px;border:1px solid var(--border-color-primary, #e5e7eb);border-radius:var(--radius-lg, 8px);margin-bottom:4px}.file-list.svelte-1xvfk9n a:where(.svelte-1xvfk9n){color:var(--secondary-600, #2563eb);text-decoration:none;font-size:var(--text-md, 14px)}.file-list.svelte-1xvfk9n a:where(.svelte-1xvfk9n):hover{text-decoration:underline}.empty-state.svelte-1xvfk9n{max-width:640px;padding:40px 24px;color:var(--body-text-color, #1f2937)}.empty-state.svelte-1xvfk9n h2:where(.svelte-1xvfk9n){margin:0 0 8px;font-size:20px;font-weight:700}.empty-state.svelte-1xvfk9n p:where(.svelte-1xvfk9n){margin:12px 0 8px;color:var(--body-text-color-subdued, #6b7280)}.empty-state.svelte-1xvfk9n pre:where(.svelte-1xvfk9n){background:var(--background-fill-secondary, #f9fafb);padding:16px;border-radius:var(--radius-lg, 8px);border:1px solid var(--border-color-primary, #e5e7eb);font-size:13px;overflow-x:auto}.empty-state.svelte-1xvfk9n code:where(.svelte-1xvfk9n){background:var(--background-fill-secondary, #f0f0f0);padding:1px 5px;border-radius:var(--radius-sm, 4px);font-size:13px}.empty-state.svelte-1xvfk9n pre:where(.svelte-1xvfk9n) code:where(.svelte-1xvfk9n){background:none;padding:0}*{margin:0;padding:0;box-sizing:border-box}body{font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,sans-serif;background:var(--background-fill-primary, #fff);color:var(--body-text-color, #1f2937);font-size:var(--text-md, 14px);-webkit-font-smoothing:antialiased}.app.svelte-1n46o8q{display:flex;height:100vh;overflow:hidden}.main.svelte-1n46o8q{flex:1;display:flex;flex-direction:column;overflow:hidden;min-width:0}.page-content.svelte-1n46o8q{flex:1;overflow:hidden;display:flex;background:var(--bg-primary)}
trackio/frontend/dist/assets/index-D1G_q77u.js ADDED
The diff for this file is too large to render. See raw diff
 
trackio/frontend/dist/index.html ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <title>Trackio Dashboard</title>
7
+ <link rel="icon" type="image/png" href="/static/trackio/trackio_logo_light.png" />
8
+ <script type="module" crossorigin src="/assets/index-D1G_q77u.js"></script>
9
+ <link rel="stylesheet" crossorigin href="/assets/index-0Wf8YNCR.css">
10
+ </head>
11
+ <body>
12
+ <div id="app"></div>
13
+ </body>
14
+ </html>
trackio/frontend/eslint.config.js ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import js from "@eslint/js";
2
+ import svelte from "eslint-plugin-svelte";
3
+ import svelteParser from "svelte-eslint-parser";
4
+ import globals from "globals";
5
+
6
+ export default [
7
+ { ignores: ["dist/**", "node_modules/**"] },
8
+ {
9
+ files: ["**/*.js"],
10
+ languageOptions: {
11
+ globals: {
12
+ ...globals.browser,
13
+ ...globals.es2021,
14
+ $state: "readonly",
15
+ $derived: "readonly",
16
+ $effect: "readonly",
17
+ $props: "readonly",
18
+ $bindable: "readonly",
19
+ $inspect: "readonly",
20
+ },
21
+ },
22
+ rules: {
23
+ ...js.configs.recommended.rules,
24
+ "no-unused-vars": ["error", { argsIgnorePattern: "^_" }],
25
+ "no-empty": "off",
26
+ },
27
+ },
28
+ {
29
+ files: ["**/*.svelte"],
30
+ languageOptions: {
31
+ parser: svelteParser,
32
+ globals: { ...globals.browser, ...globals.es2021 },
33
+ },
34
+ plugins: { svelte },
35
+ rules: {
36
+ ...js.configs.recommended.rules,
37
+ ...svelte.configs.recommended.rules,
38
+ "no-unused-vars": ["error", { argsIgnorePattern: "^_", varsIgnorePattern: "^\\$" }],
39
+ "no-empty": "off",
40
+ },
41
+ },
42
+ ];
trackio/frontend/index.html ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
6
+ <title>Trackio Dashboard</title>
7
+ <link rel="icon" type="image/png" href="/static/trackio/trackio_logo_light.png" />
8
+ </head>
9
+ <body>
10
+ <div id="app"></div>
11
+ <script type="module" src="/src/main.js"></script>
12
+ </body>
13
+ </html>
trackio/frontend_server.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Serves the built Svelte frontend alongside the Gradio API."""
2
+
3
+ import logging
4
+ import re
5
+ from pathlib import Path
6
+
7
+ from starlette.responses import HTMLResponse
8
+ from starlette.routing import Mount, Route
9
+ from starlette.staticfiles import StaticFiles
10
+
11
+ FRONTEND_DIR = Path(__file__).parent / "frontend" / "dist"
12
+ ASSETS_DIR = Path(__file__).parent / "assets"
13
+
14
+ _logger = logging.getLogger(__name__)
15
+
16
+ _SPA_SEGMENTS = (
17
+ "metrics",
18
+ "system",
19
+ "media",
20
+ "reports",
21
+ "runs",
22
+ "run",
23
+ "files",
24
+ )
25
+
26
+
27
+ def mount_frontend(app):
28
+ if not FRONTEND_DIR.exists():
29
+ _logger.warning(
30
+ "Trackio dashboard UI was not mounted: %s is missing. "
31
+ "Build the frontend with `npm ci && npm run build` in trackio/frontend.",
32
+ FRONTEND_DIR,
33
+ )
34
+ return
35
+
36
+ index_html_path = FRONTEND_DIR / "index.html"
37
+ if not index_html_path.exists():
38
+ _logger.warning(
39
+ "Trackio dashboard UI was not mounted: %s is missing.",
40
+ index_html_path,
41
+ )
42
+ return
43
+
44
+ index_html_content = index_html_path.read_text()
45
+ patched_html = re.sub(
46
+ r'/assets/(index-[^"]+)',
47
+ r"/assets/app/\1",
48
+ index_html_content,
49
+ )
50
+
51
+ async def serve_frontend(request):
52
+ return HTMLResponse(patched_html)
53
+
54
+ vite_assets = StaticFiles(directory=str(FRONTEND_DIR / "assets"))
55
+ static_assets = StaticFiles(directory=str(ASSETS_DIR))
56
+
57
+ app.routes.insert(0, Mount("/static/trackio", app=static_assets))
58
+ app.routes.insert(0, Mount("/assets/app", app=vite_assets))
59
+
60
+ for seg in reversed(_SPA_SEGMENTS):
61
+ app.routes.insert(0, Route(f"/{seg}/", serve_frontend, methods=["GET"]))
62
+ app.routes.insert(0, Route(f"/{seg}", serve_frontend, methods=["GET"]))
63
+ app.routes.insert(0, Route("/", serve_frontend, methods=["GET"]))
trackio/gpu.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import threading
3
+ import warnings
4
+ from typing import TYPE_CHECKING, Any
5
+
6
+ if TYPE_CHECKING:
7
+ from trackio.run import Run
8
+
9
+ pynvml: Any = None
10
+ PYNVML_AVAILABLE = False
11
+ _nvml_initialized = False
12
+ _nvml_lock = threading.Lock()
13
+ _energy_baseline: dict[int, float] = {}
14
+
15
+
16
+ def _ensure_pynvml():
17
+ global PYNVML_AVAILABLE, pynvml
18
+ if PYNVML_AVAILABLE:
19
+ return pynvml
20
+ try:
21
+ import pynvml as _pynvml
22
+
23
+ pynvml = _pynvml
24
+ PYNVML_AVAILABLE = True
25
+ return pynvml
26
+ except ImportError:
27
+ raise ImportError(
28
+ "nvidia-ml-py is required for GPU monitoring. "
29
+ "Install it with: pip install nvidia-ml-py"
30
+ )
31
+
32
+
33
+ def _init_nvml() -> bool:
34
+ global _nvml_initialized
35
+ with _nvml_lock:
36
+ if _nvml_initialized:
37
+ return True
38
+ try:
39
+ nvml = _ensure_pynvml()
40
+ nvml.nvmlInit()
41
+ _nvml_initialized = True
42
+ return True
43
+ except Exception:
44
+ return False
45
+
46
+
47
+ def get_gpu_count() -> tuple[int, list[int]]:
48
+ """
49
+ Get the number of GPUs visible to this process and their physical indices.
50
+ Respects CUDA_VISIBLE_DEVICES environment variable.
51
+
52
+ Returns:
53
+ Tuple of (count, physical_indices) where:
54
+ - count: Number of visible GPUs
55
+ - physical_indices: List mapping logical index to physical GPU index.
56
+ e.g., if CUDA_VISIBLE_DEVICES=2,3 returns (2, [2, 3])
57
+ meaning logical GPU 0 = physical GPU 2, logical GPU 1 = physical GPU 3
58
+ """
59
+ if not _init_nvml():
60
+ return 0, []
61
+
62
+ cuda_visible = os.environ.get("CUDA_VISIBLE_DEVICES")
63
+ if cuda_visible is not None and cuda_visible.strip():
64
+ try:
65
+ indices = [int(x.strip()) for x in cuda_visible.split(",") if x.strip()]
66
+ return len(indices), indices
67
+ except ValueError:
68
+ pass
69
+
70
+ try:
71
+ total = pynvml.nvmlDeviceGetCount()
72
+ return total, list(range(total))
73
+ except Exception:
74
+ return 0, []
75
+
76
+
77
+ def gpu_available() -> bool:
78
+ """
79
+ Check if GPU monitoring is available.
80
+
81
+ Returns True if nvidia-ml-py is installed and at least one NVIDIA GPU is detected.
82
+ This is used for auto-detection of GPU logging.
83
+ """
84
+ try:
85
+ _ensure_pynvml()
86
+ count, _ = get_gpu_count()
87
+ return count > 0
88
+ except ImportError:
89
+ return False
90
+ except Exception:
91
+ return False
92
+
93
+
94
+ def reset_energy_baseline():
95
+ """Reset the energy baseline for all GPUs. Called when a new run starts."""
96
+ global _energy_baseline
97
+ _energy_baseline = {}
98
+
99
+
100
+ def collect_gpu_metrics(device: int | None = None) -> dict:
101
+ """
102
+ Collect GPU metrics for visible GPUs.
103
+
104
+ Args:
105
+ device: CUDA device index to collect metrics from. If None, collects
106
+ from all GPUs visible to this process (respects CUDA_VISIBLE_DEVICES).
107
+ The device index is the logical CUDA index (0, 1, 2...), not the
108
+ physical GPU index.
109
+
110
+ Returns:
111
+ Dictionary of GPU metrics. Keys use logical device indices (gpu/0/, gpu/1/, etc.)
112
+ which correspond to CUDA device indices, not physical GPU indices.
113
+ """
114
+ if not _init_nvml():
115
+ return {}
116
+
117
+ gpu_count, visible_gpus = get_gpu_count()
118
+ if gpu_count == 0:
119
+ return {}
120
+
121
+ if device is not None:
122
+ if device < 0 or device >= gpu_count:
123
+ return {}
124
+ gpu_indices = [(device, visible_gpus[device])]
125
+ else:
126
+ gpu_indices = list(enumerate(visible_gpus))
127
+
128
+ metrics = {}
129
+ total_util = 0.0
130
+ total_mem_used_gib = 0.0
131
+ total_power = 0.0
132
+ max_temp = 0.0
133
+ valid_util_count = 0
134
+
135
+ for logical_idx, physical_idx in gpu_indices:
136
+ prefix = f"gpu/{logical_idx}"
137
+ try:
138
+ handle = pynvml.nvmlDeviceGetHandleByIndex(physical_idx)
139
+
140
+ try:
141
+ util = pynvml.nvmlDeviceGetUtilizationRates(handle)
142
+ metrics[f"{prefix}/utilization"] = util.gpu
143
+ metrics[f"{prefix}/memory_utilization"] = util.memory
144
+ total_util += util.gpu
145
+ valid_util_count += 1
146
+ except Exception:
147
+ pass
148
+
149
+ try:
150
+ mem = pynvml.nvmlDeviceGetMemoryInfo(handle)
151
+ mem_used_gib = mem.used / (1024**3)
152
+ mem_total_gib = mem.total / (1024**3)
153
+ metrics[f"{prefix}/allocated_memory"] = mem_used_gib
154
+ metrics[f"{prefix}/total_memory"] = mem_total_gib
155
+ if mem.total > 0:
156
+ metrics[f"{prefix}/memory_usage"] = mem.used / mem.total
157
+ total_mem_used_gib += mem_used_gib
158
+ except Exception:
159
+ pass
160
+
161
+ try:
162
+ power_mw = pynvml.nvmlDeviceGetPowerUsage(handle)
163
+ power_w = power_mw / 1000.0
164
+ metrics[f"{prefix}/power"] = power_w
165
+ total_power += power_w
166
+ except Exception:
167
+ pass
168
+
169
+ try:
170
+ power_limit_mw = pynvml.nvmlDeviceGetPowerManagementLimit(handle)
171
+ power_limit_w = power_limit_mw / 1000.0
172
+ metrics[f"{prefix}/power_limit"] = power_limit_w
173
+ if power_limit_w > 0 and f"{prefix}/power" in metrics:
174
+ metrics[f"{prefix}/power_percent"] = (
175
+ metrics[f"{prefix}/power"] / power_limit_w
176
+ ) * 100
177
+ except Exception:
178
+ pass
179
+
180
+ try:
181
+ temp = pynvml.nvmlDeviceGetTemperature(
182
+ handle, pynvml.NVML_TEMPERATURE_GPU
183
+ )
184
+ metrics[f"{prefix}/temp"] = temp
185
+ max_temp = max(max_temp, temp)
186
+ except Exception:
187
+ pass
188
+
189
+ try:
190
+ sm_clock = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_SM)
191
+ metrics[f"{prefix}/sm_clock"] = sm_clock
192
+ except Exception:
193
+ pass
194
+
195
+ try:
196
+ mem_clock = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_MEM)
197
+ metrics[f"{prefix}/memory_clock"] = mem_clock
198
+ except Exception:
199
+ pass
200
+
201
+ try:
202
+ fan_speed = pynvml.nvmlDeviceGetFanSpeed(handle)
203
+ metrics[f"{prefix}/fan_speed"] = fan_speed
204
+ except Exception:
205
+ pass
206
+
207
+ try:
208
+ pstate = pynvml.nvmlDeviceGetPerformanceState(handle)
209
+ metrics[f"{prefix}/performance_state"] = pstate
210
+ except Exception:
211
+ pass
212
+
213
+ try:
214
+ energy_mj = pynvml.nvmlDeviceGetTotalEnergyConsumption(handle)
215
+ if logical_idx not in _energy_baseline:
216
+ _energy_baseline[logical_idx] = energy_mj
217
+ energy_consumed_mj = energy_mj - _energy_baseline[logical_idx]
218
+ metrics[f"{prefix}/energy_consumed"] = energy_consumed_mj / 1000.0
219
+ except Exception:
220
+ pass
221
+
222
+ try:
223
+ pcie_tx = pynvml.nvmlDeviceGetPcieThroughput(
224
+ handle, pynvml.NVML_PCIE_UTIL_TX_BYTES
225
+ )
226
+ pcie_rx = pynvml.nvmlDeviceGetPcieThroughput(
227
+ handle, pynvml.NVML_PCIE_UTIL_RX_BYTES
228
+ )
229
+ metrics[f"{prefix}/pcie_tx"] = pcie_tx / 1024.0
230
+ metrics[f"{prefix}/pcie_rx"] = pcie_rx / 1024.0
231
+ except Exception:
232
+ pass
233
+
234
+ try:
235
+ throttle = pynvml.nvmlDeviceGetCurrentClocksThrottleReasons(handle)
236
+ metrics[f"{prefix}/throttle_thermal"] = int(
237
+ bool(throttle & pynvml.nvmlClocksThrottleReasonSwThermalSlowdown)
238
+ )
239
+ metrics[f"{prefix}/throttle_power"] = int(
240
+ bool(throttle & pynvml.nvmlClocksThrottleReasonSwPowerCap)
241
+ )
242
+ metrics[f"{prefix}/throttle_hw_slowdown"] = int(
243
+ bool(throttle & pynvml.nvmlClocksThrottleReasonHwSlowdown)
244
+ )
245
+ metrics[f"{prefix}/throttle_apps"] = int(
246
+ bool(
247
+ throttle
248
+ & pynvml.nvmlClocksThrottleReasonApplicationsClocksSetting
249
+ )
250
+ )
251
+ except Exception:
252
+ pass
253
+
254
+ try:
255
+ ecc_corrected = pynvml.nvmlDeviceGetTotalEccErrors(
256
+ handle,
257
+ pynvml.NVML_MEMORY_ERROR_TYPE_CORRECTED,
258
+ pynvml.NVML_VOLATILE_ECC,
259
+ )
260
+ metrics[f"{prefix}/corrected_memory_errors"] = ecc_corrected
261
+ except Exception:
262
+ pass
263
+
264
+ try:
265
+ ecc_uncorrected = pynvml.nvmlDeviceGetTotalEccErrors(
266
+ handle,
267
+ pynvml.NVML_MEMORY_ERROR_TYPE_UNCORRECTED,
268
+ pynvml.NVML_VOLATILE_ECC,
269
+ )
270
+ metrics[f"{prefix}/uncorrected_memory_errors"] = ecc_uncorrected
271
+ except Exception:
272
+ pass
273
+
274
+ except Exception:
275
+ continue
276
+
277
+ if valid_util_count > 0:
278
+ metrics["gpu/mean_utilization"] = total_util / valid_util_count
279
+ if total_mem_used_gib > 0:
280
+ metrics["gpu/total_allocated_memory"] = total_mem_used_gib
281
+ if total_power > 0:
282
+ metrics["gpu/total_power"] = total_power
283
+ if max_temp > 0:
284
+ metrics["gpu/max_temp"] = max_temp
285
+
286
+ return metrics
287
+
288
+
289
+ class GpuMonitor:
290
+ def __init__(self, run: "Run", interval: float = 10.0):
291
+ self._run = run
292
+ self._interval = interval
293
+ self._stop_flag = threading.Event()
294
+ self._thread: "threading.Thread | None" = None
295
+
296
+ def start(self):
297
+ count, _ = get_gpu_count()
298
+ if count == 0:
299
+ warnings.warn(
300
+ "auto_log_gpu=True but no NVIDIA GPUs detected. GPU logging disabled."
301
+ )
302
+ return
303
+
304
+ reset_energy_baseline()
305
+ self._thread = threading.Thread(target=self._monitor_loop, daemon=True)
306
+ self._thread.start()
307
+
308
+ def stop(self):
309
+ self._stop_flag.set()
310
+ if self._thread is not None:
311
+ self._thread.join(timeout=2.0)
312
+
313
+ def _monitor_loop(self):
314
+ while not self._stop_flag.is_set():
315
+ try:
316
+ metrics = collect_gpu_metrics()
317
+ if metrics:
318
+ self._run.log_system(metrics)
319
+ except Exception:
320
+ pass
321
+
322
+ self._stop_flag.wait(timeout=self._interval)
323
+
324
+
325
+ def log_gpu(run: "Run | None" = None, device: int | None = None) -> dict:
326
+ """
327
+ Log GPU metrics to the current or specified run as system metrics.
328
+
329
+ Args:
330
+ run: Optional Run instance. If None, uses current run from context.
331
+ device: CUDA device index to collect metrics from. If None, collects
332
+ from all GPUs visible to this process (respects CUDA_VISIBLE_DEVICES).
333
+
334
+ Returns:
335
+ dict: The GPU metrics that were logged.
336
+
337
+ Example:
338
+ ```python
339
+ import trackio
340
+
341
+ run = trackio.init(project="my-project")
342
+ trackio.log({"loss": 0.5})
343
+ trackio.log_gpu() # logs all visible GPUs
344
+ trackio.log_gpu(device=0) # logs only CUDA device 0
345
+ ```
346
+ """
347
+ from trackio import context_vars
348
+
349
+ if run is None:
350
+ run = context_vars.current_run.get()
351
+ if run is None:
352
+ raise RuntimeError("Call trackio.init() before trackio.log_gpu().")
353
+
354
+ metrics = collect_gpu_metrics(device=device)
355
+ if metrics:
356
+ run.log_system(metrics)
357
+ return metrics
trackio/histogram.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Sequence
2
+
3
+ import numpy as np
4
+
5
+
6
+ class Histogram:
7
+ """
8
+ Histogram data type for Trackio, compatible with wandb.Histogram.
9
+
10
+ Args:
11
+ sequence (`np.ndarray` or `Sequence[float]` or `Sequence[int]`, *optional*):
12
+ Sequence of values to create the histogram from.
13
+ np_histogram (`tuple`, *optional*):
14
+ Pre-computed NumPy histogram as a `(hist, bins)` tuple.
15
+ num_bins (`int`, *optional*, defaults to `64`):
16
+ Number of bins for the histogram (maximum `512`).
17
+
18
+ Example:
19
+ ```python
20
+ import trackio
21
+ import numpy as np
22
+
23
+ # Create histogram from sequence
24
+ data = np.random.randn(1000)
25
+ trackio.log({"distribution": trackio.Histogram(data)})
26
+
27
+ # Create histogram from numpy histogram
28
+ hist, bins = np.histogram(data, bins=30)
29
+ trackio.log({"distribution": trackio.Histogram(np_histogram=(hist, bins))})
30
+
31
+ # Specify custom number of bins
32
+ trackio.log({"distribution": trackio.Histogram(data, num_bins=50)})
33
+ ```
34
+ """
35
+
36
+ TYPE = "trackio.histogram"
37
+
38
+ def __init__(
39
+ self,
40
+ sequence: np.ndarray | Sequence[float] | Sequence[int] | None = None,
41
+ np_histogram: tuple | None = None,
42
+ num_bins: int = 64,
43
+ ):
44
+ if sequence is None and np_histogram is None:
45
+ raise ValueError("Must provide either sequence or np_histogram")
46
+
47
+ if sequence is not None and np_histogram is not None:
48
+ raise ValueError("Cannot provide both sequence and np_histogram")
49
+
50
+ num_bins = min(num_bins, 512)
51
+
52
+ if np_histogram is not None:
53
+ self.histogram, self.bins = np_histogram
54
+ self.histogram = np.asarray(self.histogram)
55
+ self.bins = np.asarray(self.bins)
56
+ else:
57
+ data = np.asarray(sequence).flatten()
58
+ data = data[np.isfinite(data)]
59
+ if len(data) == 0:
60
+ self.histogram = np.array([])
61
+ self.bins = np.array([])
62
+ else:
63
+ self.histogram, self.bins = np.histogram(data, bins=num_bins)
64
+
65
+ def _to_dict(self) -> dict:
66
+ """Convert histogram to dictionary for storage."""
67
+ return {
68
+ "_type": self.TYPE,
69
+ "bins": self.bins.tolist(),
70
+ "values": self.histogram.tolist(),
71
+ }
trackio/imports.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import pandas as pd
5
+
6
+ from trackio import deploy, utils
7
+ from trackio.sqlite_storage import SQLiteStorage
8
+
9
+
10
+ def import_csv(
11
+ csv_path: str | Path,
12
+ project: str,
13
+ name: str | None = None,
14
+ space_id: str | None = None,
15
+ dataset_id: str | None = None,
16
+ private: bool | None = None,
17
+ force: bool = False,
18
+ ) -> None:
19
+ """
20
+ Imports a CSV file into a Trackio project. The CSV file must contain a `"step"`
21
+ column, may optionally contain a `"timestamp"` column, and any other columns will be
22
+ treated as metrics. It should also include a header row with the column names.
23
+
24
+ TODO: call init() and return a Run object so that the user can continue to log metrics to it.
25
+
26
+ Args:
27
+ csv_path (`str` or `Path`):
28
+ The str or Path to the CSV file to import.
29
+ project (`str`):
30
+ The name of the project to import the CSV file into. Must not be an existing
31
+ project.
32
+ name (`str`, *optional*):
33
+ The name of the Run to import the CSV file into. If not provided, a default
34
+ name will be generated.
35
+ name (`str`, *optional*):
36
+ The name of the run (if not provided, a default name will be generated).
37
+ space_id (`str`, *optional*):
38
+ If provided, the project will be logged to a Hugging Face Space instead of a
39
+ local directory. Should be a complete Space name like `"username/reponame"`
40
+ or `"orgname/reponame"`, or just `"reponame"` in which case the Space will
41
+ be created in the currently-logged-in Hugging Face user's namespace. If the
42
+ Space does not exist, it will be created. If the Space already exists, the
43
+ project will be logged to it.
44
+ dataset_id (`str`, *optional*):
45
+ If provided, a persistent Hugging Face Dataset will be created and the
46
+ metrics will be synced to it every 5 minutes. Should be a complete Dataset
47
+ name like `"username/datasetname"` or `"orgname/datasetname"`, or just
48
+ `"datasetname"` in which case the Dataset will be created in the
49
+ currently-logged-in Hugging Face user's namespace. If the Dataset does not
50
+ exist, it will be created. If the Dataset already exists, the project will
51
+ be appended to it. If not provided, the metrics will be logged to a local
52
+ SQLite database, unless a `space_id` is provided, in which case a Dataset
53
+ will be automatically created with the same name as the Space but with the
54
+ `"_dataset"` suffix.
55
+ private (`bool`, *optional*):
56
+ Whether to make the Space private. If None (default), the repo will be
57
+ public unless the organization's default is private. This value is ignored
58
+ if the repo already exists.
59
+ """
60
+ if SQLiteStorage.get_runs(project):
61
+ raise ValueError(
62
+ f"Project '{project}' already exists. Cannot import CSV into existing project."
63
+ )
64
+
65
+ csv_path = Path(csv_path)
66
+ if not csv_path.exists():
67
+ raise FileNotFoundError(f"CSV file not found: {csv_path}")
68
+
69
+ df = pd.read_csv(csv_path)
70
+ if df.empty:
71
+ raise ValueError("CSV file is empty")
72
+
73
+ column_mapping = utils.simplify_column_names(df.columns.tolist())
74
+ df = df.rename(columns=column_mapping)
75
+
76
+ step_column = None
77
+ for col in df.columns:
78
+ if col.lower() == "step":
79
+ step_column = col
80
+ break
81
+
82
+ if step_column is None:
83
+ raise ValueError("CSV file must contain a 'step' or 'Step' column")
84
+
85
+ if name is None:
86
+ name = csv_path.stem
87
+
88
+ metrics_list = []
89
+ steps = []
90
+ timestamps = []
91
+
92
+ numeric_columns = []
93
+ for column in df.columns:
94
+ if column == step_column:
95
+ continue
96
+ if column == "timestamp":
97
+ continue
98
+
99
+ try:
100
+ pd.to_numeric(df[column], errors="raise")
101
+ numeric_columns.append(column)
102
+ except (ValueError, TypeError):
103
+ continue
104
+
105
+ for _, row in df.iterrows():
106
+ metrics = {}
107
+ for column in numeric_columns:
108
+ value = row[column]
109
+ if bool(pd.notna(value)):
110
+ metrics[column] = float(value)
111
+
112
+ if metrics:
113
+ metrics_list.append(metrics)
114
+ steps.append(int(row[step_column]))
115
+
116
+ if "timestamp" in df.columns and bool(pd.notna(row["timestamp"])):
117
+ timestamps.append(str(row["timestamp"]))
118
+ else:
119
+ timestamps.append("")
120
+
121
+ if metrics_list:
122
+ SQLiteStorage.bulk_log(
123
+ project=project,
124
+ run=name,
125
+ metrics_list=metrics_list,
126
+ steps=steps,
127
+ timestamps=timestamps,
128
+ )
129
+
130
+ print(
131
+ f"* Imported {len(metrics_list)} rows from {csv_path} into project '{project}' as run '{name}'"
132
+ )
133
+ print(f"* Metrics found: {', '.join(metrics_list[0].keys())}")
134
+
135
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(space_id, dataset_id)
136
+ if dataset_id is not None:
137
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
138
+ print(f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}")
139
+
140
+ if space_id is None:
141
+ utils.print_dashboard_instructions(project)
142
+ else:
143
+ deploy.create_space_if_not_exists(
144
+ space_id=space_id, dataset_id=dataset_id, private=private
145
+ )
146
+ deploy.wait_until_space_exists(space_id=space_id)
147
+ deploy.upload_db_to_space(project=project, space_id=space_id, force=force)
148
+ print(
149
+ f"* View dashboard by going to: {deploy.SPACE_URL.format(space_id=space_id)}"
150
+ )
151
+
152
+
153
+ def import_tf_events(
154
+ log_dir: str | Path,
155
+ project: str,
156
+ name: str | None = None,
157
+ space_id: str | None = None,
158
+ dataset_id: str | None = None,
159
+ private: bool | None = None,
160
+ force: bool = False,
161
+ ) -> None:
162
+ """
163
+ Imports TensorFlow Events files from a directory into a Trackio project. Each
164
+ subdirectory in the log directory will be imported as a separate run.
165
+
166
+ Args:
167
+ log_dir (`str` or `Path`):
168
+ The str or Path to the directory containing TensorFlow Events files.
169
+ project (`str`):
170
+ The name of the project to import the TensorFlow Events files into. Must not
171
+ be an existing project.
172
+ name (`str`, *optional*):
173
+ The name prefix for runs (if not provided, will use directory names). Each
174
+ subdirectory will create a separate run.
175
+ space_id (`str`, *optional*):
176
+ If provided, the project will be logged to a Hugging Face Space instead of a
177
+ local directory. Should be a complete Space name like `"username/reponame"`
178
+ or `"orgname/reponame"`, or just `"reponame"` in which case the Space will
179
+ be created in the currently-logged-in Hugging Face user's namespace. If the
180
+ Space does not exist, it will be created. If the Space already exists, the
181
+ project will be logged to it.
182
+ dataset_id (`str`, *optional*):
183
+ If provided, a persistent Hugging Face Dataset will be created and the
184
+ metrics will be synced to it every 5 minutes. Should be a complete Dataset
185
+ name like `"username/datasetname"` or `"orgname/datasetname"`, or just
186
+ `"datasetname"` in which case the Dataset will be created in the
187
+ currently-logged-in Hugging Face user's namespace. If the Dataset does not
188
+ exist, it will be created. If the Dataset already exists, the project will
189
+ be appended to it. If not provided, the metrics will be logged to a local
190
+ SQLite database, unless a `space_id` is provided, in which case a Dataset
191
+ will be automatically created with the same name as the Space but with the
192
+ `"_dataset"` suffix.
193
+ private (`bool`, *optional*):
194
+ Whether to make the Space private. If None (default), the repo will be
195
+ public unless the organization's default is private. This value is ignored
196
+ if the repo already exists.
197
+ """
198
+ try:
199
+ from tbparse import SummaryReader
200
+ except ImportError:
201
+ raise ImportError(
202
+ "The `tbparse` package is not installed but is required for `import_tf_events`. Please install trackio with the `tensorboard` extra: `pip install trackio[tensorboard]`."
203
+ )
204
+
205
+ if SQLiteStorage.get_runs(project):
206
+ raise ValueError(
207
+ f"Project '{project}' already exists. Cannot import TF events into existing project."
208
+ )
209
+
210
+ path = Path(log_dir)
211
+ if not path.exists():
212
+ raise FileNotFoundError(f"TF events directory not found: {path}")
213
+
214
+ # Use tbparse to read all tfevents files in the directory structure
215
+ reader = SummaryReader(str(path), extra_columns={"dir_name"})
216
+ df = reader.scalars
217
+
218
+ if df.empty:
219
+ raise ValueError(f"No TensorFlow events data found in {path}")
220
+
221
+ total_imported = 0
222
+ imported_runs = []
223
+
224
+ # Group by dir_name to create separate runs
225
+ for dir_name, group_df in df.groupby("dir_name"):
226
+ try:
227
+ # Determine run name based on directory name
228
+ if dir_name == "":
229
+ run_name = "main" # For files in the root directory
230
+ else:
231
+ run_name = dir_name # Use directory name
232
+
233
+ if name:
234
+ run_name = f"{name}_{run_name}"
235
+
236
+ if group_df.empty:
237
+ print(f"* Skipping directory {dir_name}: no scalar data found")
238
+ continue
239
+
240
+ metrics_list = []
241
+ steps = []
242
+ timestamps = []
243
+
244
+ for _, row in group_df.iterrows():
245
+ # Convert row values to appropriate types
246
+ tag = str(row["tag"])
247
+ value = float(row["value"])
248
+ step = int(row["step"])
249
+
250
+ metrics = {tag: value}
251
+ metrics_list.append(metrics)
252
+ steps.append(step)
253
+
254
+ # Use wall_time if present, else fallback
255
+ if "wall_time" in group_df.columns and not bool(
256
+ pd.isna(row["wall_time"])
257
+ ):
258
+ timestamps.append(str(row["wall_time"]))
259
+ else:
260
+ timestamps.append("")
261
+
262
+ if metrics_list:
263
+ SQLiteStorage.bulk_log(
264
+ project=project,
265
+ run=str(run_name),
266
+ metrics_list=metrics_list,
267
+ steps=steps,
268
+ timestamps=timestamps,
269
+ )
270
+
271
+ total_imported += len(metrics_list)
272
+ imported_runs.append(run_name)
273
+
274
+ print(
275
+ f"* Imported {len(metrics_list)} scalar events from directory '{dir_name}' as run '{run_name}'"
276
+ )
277
+ print(f"* Metrics in this run: {', '.join(set(group_df['tag']))}")
278
+
279
+ except Exception as e:
280
+ print(f"* Error processing directory {dir_name}: {e}")
281
+ continue
282
+
283
+ if not imported_runs:
284
+ raise ValueError("No valid TensorFlow events data could be imported")
285
+
286
+ print(f"* Total imported events: {total_imported}")
287
+ print(f"* Created runs: {', '.join(imported_runs)}")
288
+
289
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(space_id, dataset_id)
290
+ if dataset_id is not None:
291
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
292
+ print(f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}")
293
+
294
+ if space_id is None:
295
+ utils.print_dashboard_instructions(project)
296
+ else:
297
+ deploy.create_space_if_not_exists(
298
+ space_id, dataset_id=dataset_id, private=private
299
+ )
300
+ deploy.wait_until_space_exists(space_id)
301
+ deploy.upload_db_to_space(project, space_id, force=force)
302
+ print(
303
+ f"* View dashboard by going to: {deploy.SPACE_URL.format(space_id=space_id)}"
304
+ )
trackio/markdown.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class Markdown:
2
+ """
3
+ Markdown report data type for Trackio.
4
+
5
+ Args:
6
+ text (`str`):
7
+ Markdown content to log.
8
+ """
9
+
10
+ TYPE = "trackio.markdown"
11
+
12
+ def __init__(self, text: str = ""):
13
+ if not isinstance(text, str):
14
+ raise ValueError("Markdown text must be a string")
15
+ self.text = text
16
+
17
+ def _to_dict(self) -> dict:
18
+ return {
19
+ "_type": self.TYPE,
20
+ "_value": self.text,
21
+ }
trackio/media/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Media module for Trackio.
3
+
4
+ This module contains all media-related functionality including:
5
+ - TrackioImage, TrackioVideo, TrackioAudio classes
6
+ - Video writing utilities
7
+ - Audio conversion utilities
8
+ """
9
+
10
+ from trackio.media.audio import TrackioAudio
11
+ from trackio.media.image import TrackioImage
12
+ from trackio.media.media import TrackioMedia
13
+ from trackio.media.utils import get_project_media_path
14
+ from trackio.media.video import TrackioVideo
15
+
16
+ write_audio = TrackioAudio.write_audio
17
+ write_video = TrackioVideo.write_video
18
+
19
+ __all__ = [
20
+ "TrackioMedia",
21
+ "TrackioImage",
22
+ "TrackioVideo",
23
+ "TrackioAudio",
24
+ "get_project_media_path",
25
+ "write_video",
26
+ "write_audio",
27
+ ]
trackio/media/audio.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import warnings
4
+ from pathlib import Path
5
+ from typing import Literal
6
+
7
+ import numpy as np
8
+ from pydub import AudioSegment
9
+
10
+ from trackio.media.media import TrackioMedia
11
+ from trackio.media.utils import check_ffmpeg_installed, check_path
12
+
13
+ SUPPORTED_FORMATS = ["wav", "mp3"]
14
+ AudioFormatType = Literal["wav", "mp3"]
15
+ TrackioAudioSourceType = str | Path | np.ndarray
16
+
17
+
18
+ class TrackioAudio(TrackioMedia):
19
+ """
20
+ Initializes an Audio object.
21
+
22
+ Example:
23
+ ```python
24
+ import trackio
25
+ import numpy as np
26
+
27
+ # Generate a 1-second 440 Hz sine wave (mono)
28
+ sr = 16000
29
+ t = np.linspace(0, 1, sr, endpoint=False)
30
+ wave = 0.2 * np.sin(2 * np.pi * 440 * t)
31
+ audio = trackio.Audio(wave, caption="A4 sine", sample_rate=sr, format="wav")
32
+ trackio.log({"tone": audio})
33
+
34
+ # Stereo from numpy array (shape: samples, 2)
35
+ stereo = np.stack([wave, wave], axis=1)
36
+ audio = trackio.Audio(stereo, caption="Stereo", sample_rate=sr, format="mp3")
37
+ trackio.log({"stereo": audio})
38
+
39
+ # From an existing file
40
+ audio = trackio.Audio("path/to/audio.wav", caption="From file")
41
+ trackio.log({"file_audio": audio})
42
+ ```
43
+
44
+ Args:
45
+ value (`str`, `Path`, or `numpy.ndarray`, *optional*):
46
+ A path to an audio file, or a numpy array.
47
+ The array should be shaped `(samples,)` for mono or `(samples, 2)` for stereo.
48
+ Float arrays will be peak-normalized and converted to 16-bit PCM; integer arrays will be converted to 16-bit PCM as needed.
49
+ caption (`str`, *optional*):
50
+ A string caption for the audio.
51
+ sample_rate (`int`, *optional*):
52
+ Sample rate in Hz. Required when `value` is a numpy array.
53
+ format (`Literal["wav", "mp3"]`, *optional*):
54
+ Audio format used when `value` is a numpy array. Default is "wav".
55
+ """
56
+
57
+ TYPE = "trackio.audio"
58
+
59
+ def __init__(
60
+ self,
61
+ value: TrackioAudioSourceType,
62
+ caption: str | None = None,
63
+ sample_rate: int | None = None,
64
+ format: AudioFormatType | None = None,
65
+ ):
66
+ super().__init__(value, caption)
67
+ if isinstance(value, np.ndarray):
68
+ if sample_rate is None:
69
+ raise ValueError("Sample rate is required when value is an ndarray")
70
+ if format is None:
71
+ format = "wav"
72
+ self._format = format
73
+ self._sample_rate = sample_rate
74
+
75
+ def _save_media(self, file_path: Path):
76
+ if isinstance(self._value, np.ndarray):
77
+ TrackioAudio.write_audio(
78
+ data=self._value,
79
+ sample_rate=self._sample_rate,
80
+ filename=file_path,
81
+ format=self._format,
82
+ )
83
+ elif isinstance(self._value, str | Path):
84
+ if os.path.isfile(self._value):
85
+ shutil.copy(self._value, file_path)
86
+ else:
87
+ raise ValueError(f"File not found: {self._value}")
88
+
89
+ @staticmethod
90
+ def ensure_int16_pcm(data: np.ndarray) -> np.ndarray:
91
+ """
92
+ Convert input audio array to contiguous int16 PCM.
93
+ Peak normalization is applied to floating inputs.
94
+ """
95
+ arr = np.asarray(data)
96
+ if arr.ndim not in (1, 2):
97
+ raise ValueError("Audio data must be 1D (mono) or 2D ([samples, channels])")
98
+
99
+ if arr.dtype != np.int16:
100
+ warnings.warn(
101
+ f"Converting {arr.dtype} audio to int16 PCM; pass int16 to avoid conversion.",
102
+ stacklevel=2,
103
+ )
104
+
105
+ arr = np.nan_to_num(arr, copy=False)
106
+
107
+ # Floating types: normalize to peak 1.0, then scale to int16
108
+ if np.issubdtype(arr.dtype, np.floating):
109
+ max_abs = float(np.max(np.abs(arr))) if arr.size else 0.0
110
+ if max_abs > 0.0:
111
+ arr = arr / max_abs
112
+ out = (arr * 32767.0).clip(-32768, 32767).astype(np.int16, copy=False)
113
+ return np.ascontiguousarray(out)
114
+
115
+ converters: dict[np.dtype, callable] = {
116
+ np.dtype(np.int16): lambda a: a,
117
+ np.dtype(np.int32): lambda a: (a.astype(np.int32) // 65536).astype(
118
+ np.int16, copy=False
119
+ ),
120
+ np.dtype(np.uint16): lambda a: (a.astype(np.int32) - 32768).astype(
121
+ np.int16, copy=False
122
+ ),
123
+ np.dtype(np.uint8): lambda a: (a.astype(np.int32) * 257 - 32768).astype(
124
+ np.int16, copy=False
125
+ ),
126
+ np.dtype(np.int8): lambda a: (a.astype(np.int32) * 256).astype(
127
+ np.int16, copy=False
128
+ ),
129
+ }
130
+
131
+ conv = converters.get(arr.dtype)
132
+ if conv is not None:
133
+ out = conv(arr)
134
+ return np.ascontiguousarray(out)
135
+ raise TypeError(f"Unsupported audio dtype: {arr.dtype}")
136
+
137
+ @staticmethod
138
+ def write_audio(
139
+ data: np.ndarray,
140
+ sample_rate: int,
141
+ filename: str | Path,
142
+ format: AudioFormatType = "wav",
143
+ ) -> None:
144
+ if not isinstance(sample_rate, int) or sample_rate <= 0:
145
+ raise ValueError(f"Invalid sample_rate: {sample_rate}")
146
+ if format not in SUPPORTED_FORMATS:
147
+ raise ValueError(
148
+ f"Unsupported format: {format}. Supported: {SUPPORTED_FORMATS}"
149
+ )
150
+
151
+ check_path(filename)
152
+
153
+ pcm = TrackioAudio.ensure_int16_pcm(data)
154
+
155
+ if format != "wav":
156
+ check_ffmpeg_installed()
157
+
158
+ channels = 1 if pcm.ndim == 1 else pcm.shape[1]
159
+ audio = AudioSegment(
160
+ pcm.tobytes(),
161
+ frame_rate=sample_rate,
162
+ sample_width=2, # int16
163
+ channels=channels,
164
+ )
165
+
166
+ file = audio.export(str(filename), format=format)
167
+ file.close()
trackio/media/image.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ from PIL import Image as PILImage
7
+
8
+ from trackio.media.media import TrackioMedia
9
+
10
+ TrackioImageSourceType = str | Path | np.ndarray | PILImage.Image
11
+
12
+
13
+ class TrackioImage(TrackioMedia):
14
+ """
15
+ Initializes an Image object.
16
+
17
+ Example:
18
+ ```python
19
+ import trackio
20
+ import numpy as np
21
+ from PIL import Image
22
+
23
+ # Create an image from numpy array
24
+ image_data = np.random.randint(0, 255, (64, 64, 3), dtype=np.uint8)
25
+ image = trackio.Image(image_data, caption="Random image")
26
+ trackio.log({"my_image": image})
27
+
28
+ # Create an image from PIL Image
29
+ pil_image = Image.new('RGB', (100, 100), color='red')
30
+ image = trackio.Image(pil_image, caption="Red square")
31
+ trackio.log({"red_image": image})
32
+
33
+ # Create an image from file path
34
+ image = trackio.Image("path/to/image.jpg", caption="Photo from file")
35
+ trackio.log({"file_image": image})
36
+ ```
37
+
38
+ Args:
39
+ value (`str`, `Path`, `numpy.ndarray`, or `PIL.Image`, *optional*):
40
+ A path to an image, a PIL Image, or a numpy array of shape (height, width, channels).
41
+ If numpy array, should be of type `np.uint8` with RGB values in the range `[0, 255]`.
42
+ caption (`str`, *optional*):
43
+ A string caption for the image.
44
+ """
45
+
46
+ TYPE = "trackio.image"
47
+
48
+ def __init__(self, value: TrackioImageSourceType, caption: str | None = None):
49
+ super().__init__(value, caption)
50
+ self._format: str | None = None
51
+
52
+ if not isinstance(self._value, TrackioImageSourceType):
53
+ raise ValueError(
54
+ f"Invalid value type, expected {TrackioImageSourceType}, got {type(self._value)}"
55
+ )
56
+ if isinstance(self._value, np.ndarray) and self._value.dtype != np.uint8:
57
+ raise ValueError(
58
+ f"Invalid value dtype, expected np.uint8, got {self._value.dtype}"
59
+ )
60
+ if (
61
+ isinstance(self._value, np.ndarray | PILImage.Image)
62
+ and self._format is None
63
+ ):
64
+ self._format = "png"
65
+
66
+ def _as_pil(self) -> PILImage.Image | None:
67
+ try:
68
+ if isinstance(self._value, np.ndarray):
69
+ arr = np.asarray(self._value).astype("uint8")
70
+ return PILImage.fromarray(arr).convert("RGBA")
71
+ if isinstance(self._value, PILImage.Image):
72
+ return self._value.convert("RGBA")
73
+ except Exception as e:
74
+ raise ValueError(f"Failed to process image data: {self._value}") from e
75
+ return None
76
+
77
+ def _save_media(self, file_path: Path):
78
+ if pil := self._as_pil():
79
+ pil.save(file_path, format=self._format)
80
+ elif isinstance(self._value, str | Path):
81
+ if os.path.isfile(self._value):
82
+ shutil.copy(self._value, file_path)
83
+ else:
84
+ raise ValueError(f"File not found: {self._value}")
trackio/media/media.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ from abc import ABC, abstractmethod
4
+ from pathlib import Path
5
+
6
+ from trackio.media.utils import get_project_media_path
7
+ from trackio.utils import MEDIA_DIR
8
+
9
+
10
+ class TrackioMedia(ABC):
11
+ """
12
+ Abstract base class for Trackio media objects
13
+ Provides shared functionality for file handling and serialization.
14
+ """
15
+
16
+ TYPE: str
17
+
18
+ def __init_subclass__(cls, **kwargs):
19
+ """Ensure subclasses define the TYPE attribute."""
20
+ super().__init_subclass__(**kwargs)
21
+ if not hasattr(cls, "TYPE") or cls.TYPE is None:
22
+ raise TypeError(f"Class {cls.__name__} must define TYPE attribute")
23
+
24
+ def __init__(self, value, caption: str | None = None):
25
+ """
26
+ Saves the value and caption, and if the value is a file path, checks if the file exists.
27
+ """
28
+ self.caption = caption
29
+ self._value = value
30
+ self._file_path: Path | None = None
31
+
32
+ if isinstance(self._value, str | Path):
33
+ if not os.path.isfile(self._value):
34
+ raise ValueError(f"File not found: {self._value}")
35
+
36
+ def _file_extension(self) -> str:
37
+ if self._file_path:
38
+ return self._file_path.suffix[1:].lower()
39
+ if isinstance(self._value, str | Path):
40
+ path = Path(self._value)
41
+ return path.suffix[1:].lower()
42
+ if hasattr(self, "_format") and self._format:
43
+ return self._format
44
+ return "unknown"
45
+
46
+ def _get_relative_file_path(self) -> Path | None:
47
+ return self._file_path
48
+
49
+ def _get_absolute_file_path(self) -> Path | None:
50
+ if self._file_path:
51
+ return MEDIA_DIR / self._file_path
52
+ return None
53
+
54
+ def _save(self, project: str, run: str, step: int = 0):
55
+ if self._file_path:
56
+ return
57
+
58
+ media_dir = get_project_media_path(project=project, run=run, step=step)
59
+ filename = f"{uuid.uuid4()}.{self._file_extension()}"
60
+ file_path = media_dir / filename
61
+
62
+ self._save_media(file_path)
63
+ self._file_path = file_path.relative_to(MEDIA_DIR)
64
+
65
+ @abstractmethod
66
+ def _save_media(self, file_path: Path):
67
+ """
68
+ Performs the actual media saving logic.
69
+ """
70
+ pass
71
+
72
+ def _to_dict(self) -> dict:
73
+ if not self._file_path:
74
+ raise ValueError("Media must be saved to file before serialization")
75
+ return {
76
+ "_type": self.TYPE,
77
+ "file_path": str(self._get_relative_file_path()),
78
+ "caption": self.caption,
79
+ }
trackio/media/utils.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import shutil
2
+ from pathlib import Path
3
+
4
+ from trackio.utils import MEDIA_DIR
5
+
6
+
7
+ def check_path(file_path: str | Path) -> None:
8
+ """Raise an error if the parent directory does not exist."""
9
+ file_path = Path(file_path)
10
+ if not file_path.parent.exists():
11
+ try:
12
+ file_path.parent.mkdir(parents=True, exist_ok=True)
13
+ except OSError as e:
14
+ raise ValueError(
15
+ f"Failed to create parent directory {file_path.parent}: {e}"
16
+ )
17
+
18
+
19
+ def check_ffmpeg_installed() -> None:
20
+ """Raise an error if ffmpeg is not available on the system PATH."""
21
+ if shutil.which("ffmpeg") is None:
22
+ raise RuntimeError(
23
+ "ffmpeg is required to write video but was not found on your system. "
24
+ "Please install ffmpeg and ensure it is available on your PATH."
25
+ )
26
+
27
+
28
+ def get_project_media_path(
29
+ project: str,
30
+ run: str | None = None,
31
+ step: int | None = None,
32
+ relative_path: str | Path | None = None,
33
+ ) -> Path:
34
+ """
35
+ Get the full path where uploaded files are stored for a Trackio project (and create the directory if it doesn't exist).
36
+ If a run is not provided, the files are stored in a project-level directory with the given relative path.
37
+
38
+ Args:
39
+ project: The project name
40
+ run: The run name
41
+ step: The step number
42
+ relative_path: The relative path within the directory (only used if run is not provided)
43
+
44
+ Returns:
45
+ The full path to the media file
46
+ """
47
+ if step is not None and run is None:
48
+ raise ValueError("Uploading files at a specific step requires a run")
49
+
50
+ path = MEDIA_DIR / project
51
+ if run:
52
+ path /= run
53
+ if step is not None:
54
+ path /= str(step)
55
+ else:
56
+ path /= "files"
57
+ if relative_path:
58
+ path /= relative_path
59
+ path.mkdir(parents=True, exist_ok=True)
60
+ return path
trackio/media/video.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ from pathlib import Path
5
+ from typing import Literal
6
+
7
+ import numpy as np
8
+
9
+ from trackio.media.media import TrackioMedia
10
+ from trackio.media.utils import check_ffmpeg_installed, check_path
11
+
12
+ TrackioVideoSourceType = str | Path | np.ndarray
13
+ TrackioVideoFormatType = Literal["gif", "mp4", "webm"]
14
+ VideoCodec = Literal["h264", "vp9", "gif"]
15
+
16
+
17
+ class TrackioVideo(TrackioMedia):
18
+ """
19
+ Initializes a Video object.
20
+
21
+ Example:
22
+ ```python
23
+ import trackio
24
+ import numpy as np
25
+
26
+ # Create a simple video from numpy array
27
+ frames = np.random.randint(0, 255, (10, 3, 64, 64), dtype=np.uint8)
28
+ video = trackio.Video(frames, caption="Random video", fps=30)
29
+
30
+ # Create a batch of videos
31
+ batch_frames = np.random.randint(0, 255, (3, 10, 3, 64, 64), dtype=np.uint8)
32
+ batch_video = trackio.Video(batch_frames, caption="Batch of videos", fps=15)
33
+
34
+ # Create video from file path
35
+ video = trackio.Video("path/to/video.mp4", caption="Video from file")
36
+ ```
37
+
38
+ Args:
39
+ value (`str`, `Path`, or `numpy.ndarray`, *optional*):
40
+ A path to a video file, or a numpy array.
41
+ If numpy array, should be of type `np.uint8` with RGB values in the range `[0, 255]`.
42
+ It is expected to have shape of either (frames, channels, height, width) or (batch, frames, channels, height, width).
43
+ For the latter, the videos will be tiled into a grid.
44
+ caption (`str`, *optional*):
45
+ A string caption for the video.
46
+ fps (`int`, *optional*):
47
+ Frames per second for the video. Only used when value is an ndarray. Default is `24`.
48
+ format (`Literal["gif", "mp4", "webm"]`, *optional*):
49
+ Video format ("gif", "mp4", or "webm"). Only used when value is an ndarray. Default is "gif".
50
+ """
51
+
52
+ TYPE = "trackio.video"
53
+
54
+ def __init__(
55
+ self,
56
+ value: TrackioVideoSourceType,
57
+ caption: str | None = None,
58
+ fps: int | None = None,
59
+ format: TrackioVideoFormatType | None = None,
60
+ ):
61
+ super().__init__(value, caption)
62
+
63
+ if not isinstance(self._value, TrackioVideoSourceType):
64
+ raise ValueError(
65
+ f"Invalid value type, expected {TrackioVideoSourceType}, got {type(self._value)}"
66
+ )
67
+ if isinstance(self._value, np.ndarray):
68
+ if self._value.dtype != np.uint8:
69
+ raise ValueError(
70
+ f"Invalid value dtype, expected np.uint8, got {self._value.dtype}"
71
+ )
72
+ if format is None:
73
+ format = "gif"
74
+ if fps is None:
75
+ fps = 24
76
+ self._fps = fps
77
+ self._format = format
78
+
79
+ @staticmethod
80
+ def _check_array_format(video: np.ndarray) -> None:
81
+ """Raise an error if the array is not in the expected format."""
82
+ if not (video.ndim == 4 and video.shape[-1] == 3):
83
+ raise ValueError(
84
+ f"Expected RGB input shaped (F, H, W, 3), got {video.shape}. "
85
+ f"Input has {video.ndim} dimensions, expected 4."
86
+ )
87
+ if video.dtype != np.uint8:
88
+ raise TypeError(
89
+ f"Expected dtype=uint8, got {video.dtype}. "
90
+ "Please convert your video data to uint8 format."
91
+ )
92
+
93
+ @staticmethod
94
+ def write_video(
95
+ file_path: str | Path, video: np.ndarray, fps: float, codec: VideoCodec
96
+ ) -> None:
97
+ """RGB uint8 only, shape (F, H, W, 3)."""
98
+ check_ffmpeg_installed()
99
+ check_path(file_path)
100
+
101
+ if codec not in {"h264", "vp9", "gif"}:
102
+ raise ValueError("Unsupported codec. Use h264, vp9, or gif.")
103
+
104
+ arr = np.asarray(video)
105
+ TrackioVideo._check_array_format(arr)
106
+
107
+ frames = np.ascontiguousarray(arr)
108
+ _, height, width, _ = frames.shape
109
+ out_path = str(file_path)
110
+
111
+ cmd = [
112
+ "ffmpeg",
113
+ "-y",
114
+ "-f",
115
+ "rawvideo",
116
+ "-s",
117
+ f"{width}x{height}",
118
+ "-pix_fmt",
119
+ "rgb24",
120
+ "-r",
121
+ str(fps),
122
+ "-i",
123
+ "-",
124
+ "-an",
125
+ ]
126
+
127
+ if codec == "gif":
128
+ video_filter = "split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse"
129
+ cmd += [
130
+ "-vf",
131
+ video_filter,
132
+ "-loop",
133
+ "0",
134
+ ]
135
+ elif codec == "h264":
136
+ cmd += [
137
+ "-vcodec",
138
+ "libx264",
139
+ "-pix_fmt",
140
+ "yuv420p",
141
+ "-movflags",
142
+ "+faststart",
143
+ ]
144
+ elif codec == "vp9":
145
+ bpp = 0.08
146
+ bps = int(width * height * fps * bpp)
147
+ if bps >= 1_000_000:
148
+ bitrate = f"{round(bps / 1_000_000)}M"
149
+ elif bps >= 1_000:
150
+ bitrate = f"{round(bps / 1_000)}k"
151
+ else:
152
+ bitrate = str(max(bps, 1))
153
+ cmd += [
154
+ "-vcodec",
155
+ "libvpx-vp9",
156
+ "-b:v",
157
+ bitrate,
158
+ "-pix_fmt",
159
+ "yuv420p",
160
+ ]
161
+ cmd += [out_path]
162
+ proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
163
+ try:
164
+ for frame in frames:
165
+ proc.stdin.write(frame.tobytes())
166
+ finally:
167
+ if proc.stdin:
168
+ proc.stdin.close()
169
+ stderr = (
170
+ proc.stderr.read().decode("utf-8", errors="ignore")
171
+ if proc.stderr
172
+ else ""
173
+ )
174
+ ret = proc.wait()
175
+ if ret != 0:
176
+ raise RuntimeError(f"ffmpeg failed with code {ret}\n{stderr}")
177
+
178
+ @property
179
+ def _codec(self) -> str:
180
+ match self._format:
181
+ case "gif":
182
+ return "gif"
183
+ case "mp4":
184
+ return "h264"
185
+ case "webm":
186
+ return "vp9"
187
+ case _:
188
+ raise ValueError(f"Unsupported format: {self._format}")
189
+
190
+ def _save_media(self, file_path: Path):
191
+ if isinstance(self._value, np.ndarray):
192
+ video = TrackioVideo._process_ndarray(self._value)
193
+ TrackioVideo.write_video(file_path, video, fps=self._fps, codec=self._codec)
194
+ elif isinstance(self._value, str | Path):
195
+ if os.path.isfile(self._value):
196
+ shutil.copy(self._value, file_path)
197
+ else:
198
+ raise ValueError(f"File not found: {self._value}")
199
+
200
+ @staticmethod
201
+ def _process_ndarray(value: np.ndarray) -> np.ndarray:
202
+ # Verify value is either 4D (single video) or 5D array (batched videos).
203
+ # Expected format: (frames, channels, height, width) or (batch, frames, channels, height, width)
204
+ if value.ndim < 4:
205
+ raise ValueError(
206
+ "Video requires at least 4 dimensions (frames, channels, height, width)"
207
+ )
208
+ if value.ndim > 5:
209
+ raise ValueError(
210
+ "Videos can have at most 5 dimensions (batch, frames, channels, height, width)"
211
+ )
212
+ if value.ndim == 4:
213
+ # Reshape to 5D with single batch: (1, frames, channels, height, width)
214
+ value = value[np.newaxis, ...]
215
+
216
+ value = TrackioVideo._tile_batched_videos(value)
217
+ return value
218
+
219
+ @staticmethod
220
+ def _tile_batched_videos(video: np.ndarray) -> np.ndarray:
221
+ """
222
+ Tiles a batch of videos into a grid of videos.
223
+
224
+ Input format: (batch, frames, channels, height, width) - original FCHW format
225
+ Output format: (frames, total_height, total_width, channels)
226
+ """
227
+ batch_size, frames, channels, height, width = video.shape
228
+
229
+ next_pow2 = 1 << (batch_size - 1).bit_length()
230
+ if batch_size != next_pow2:
231
+ pad_len = next_pow2 - batch_size
232
+ pad_shape = (pad_len, frames, channels, height, width)
233
+ padding = np.zeros(pad_shape, dtype=video.dtype)
234
+ video = np.concatenate((video, padding), axis=0)
235
+ batch_size = next_pow2
236
+
237
+ n_rows = 1 << ((batch_size.bit_length() - 1) // 2)
238
+ n_cols = batch_size // n_rows
239
+
240
+ # Reshape to grid layout: (n_rows, n_cols, frames, channels, height, width)
241
+ video = video.reshape(n_rows, n_cols, frames, channels, height, width)
242
+
243
+ # Rearrange dimensions to (frames, total_height, total_width, channels)
244
+ video = video.transpose(2, 0, 4, 1, 5, 3)
245
+ video = video.reshape(frames, n_rows * height, n_cols * width, channels)
246
+ return video
trackio/package.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "name": "trackio",
3
+ "version": "0.20.0",
4
+ "description": "",
5
+ "python": "true"
6
+ }
trackio/py.typed ADDED
File without changes
trackio/remote_client.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from gradio_client import Client
4
+
5
+
6
+ class RemoteClient:
7
+ def __init__(self, space: str, hf_token: str | None = None):
8
+ self._space = space
9
+ kwargs: dict = {"verbose": False}
10
+ if hf_token:
11
+ kwargs["hf_token"] = hf_token
12
+ try:
13
+ self._client = Client(space, **kwargs)
14
+ except Exception as e:
15
+ raise ConnectionError(
16
+ f"Could not connect to Space '{space}'. Is it running?\n{e}"
17
+ )
18
+
19
+ def predict(self, *args, api_name: str):
20
+ try:
21
+ return self._client.predict(*args, api_name=api_name)
22
+ except Exception as e:
23
+ if "API Not Found" in str(e) or "api_name" in str(e):
24
+ raise RuntimeError(
25
+ f"Space '{self._space}' does not support '{api_name}'. "
26
+ "Redeploy with `trackio sync`."
27
+ )
28
+ raise
trackio/run.py ADDED
@@ -0,0 +1,739 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import threading
4
+ import uuid
5
+ import warnings
6
+ from datetime import datetime, timezone
7
+ from pathlib import Path
8
+
9
+ import huggingface_hub
10
+ from gradio_client import Client, handle_file
11
+
12
+ from trackio import utils
13
+ from trackio.alerts import (
14
+ AlertLevel,
15
+ format_alert_terminal,
16
+ resolve_webhook_min_level,
17
+ send_webhook,
18
+ should_send_webhook,
19
+ )
20
+ from trackio.apple_gpu import AppleGpuMonitor, apple_gpu_available
21
+ from trackio.gpu import GpuMonitor, gpu_available
22
+ from trackio.histogram import Histogram
23
+ from trackio.markdown import Markdown
24
+ from trackio.media import TrackioMedia, get_project_media_path
25
+ from trackio.sqlite_storage import SQLiteStorage
26
+ from trackio.table import Table
27
+ from trackio.typehints import AlertEntry, LogEntry, SystemLogEntry, UploadEntry
28
+ from trackio.utils import _get_default_namespace
29
+
30
+ BATCH_SEND_INTERVAL = 0.5
31
+ MAX_BACKOFF = 30
32
+
33
+
34
+ class Run:
35
+ def __init__(
36
+ self,
37
+ url: str | None,
38
+ project: str,
39
+ client: Client | None,
40
+ name: str | None = None,
41
+ group: str | None = None,
42
+ config: dict | None = None,
43
+ space_id: str | None = None,
44
+ auto_log_gpu: bool = False,
45
+ gpu_log_interval: float = 10.0,
46
+ webhook_url: str | None = None,
47
+ webhook_min_level: AlertLevel | str | None = None,
48
+ ):
49
+ """
50
+ Initialize a Run for logging metrics to Trackio.
51
+
52
+ Args:
53
+ url: The URL of the Trackio server (local Gradio app or HF Space).
54
+ project: The name of the project to log metrics to.
55
+ client: A pre-configured gradio_client.Client instance, or None to
56
+ create one automatically in a background thread with retry logic.
57
+ Passing None is recommended for normal usage. Passing a client
58
+ is useful for testing (e.g., injecting a mock client).
59
+ name: The name of this run. If None, a readable name like
60
+ "brave-sunset-0" is auto-generated. If space_id is provided,
61
+ generates a "username-timestamp" format instead.
62
+ group: Optional group name to organize related runs together.
63
+ config: A dictionary of configuration/hyperparameters for this run.
64
+ Keys starting with '_' are reserved for internal use.
65
+ space_id: The HF Space ID if logging to a Space (e.g., "user/space").
66
+ If provided, media files will be uploaded to the Space.
67
+ auto_log_gpu: Whether to automatically log GPU metrics (utilization,
68
+ memory, temperature) at regular intervals.
69
+ gpu_log_interval: The interval in seconds between GPU metric logs.
70
+ Only used when auto_log_gpu is True.
71
+ webhook_url: A webhook URL to POST alert payloads to. Supports
72
+ Slack and Discord webhook URLs natively. Can also be set via
73
+ the TRACKIO_WEBHOOK_URL environment variable.
74
+ webhook_min_level: Minimum alert level that should trigger webhook
75
+ delivery. For example, `AlertLevel.WARN` sends only WARN and
76
+ ERROR alerts to webhook destinations. Can also be set via
77
+ `TRACKIO_WEBHOOK_MIN_LEVEL`.
78
+ """
79
+ self.url = url
80
+ self.project = project
81
+ self._client_lock = threading.Lock()
82
+ self._client_thread = None
83
+ self._client = client
84
+ self._space_id = space_id
85
+ self.name = name or utils.generate_readable_name(
86
+ SQLiteStorage.get_runs(project), space_id
87
+ )
88
+ self.group = group
89
+ self.config = utils.to_json_safe(config or {})
90
+
91
+ if isinstance(self.config, dict):
92
+ for key in self.config:
93
+ if key.startswith("_"):
94
+ raise ValueError(
95
+ f"Config key '{key}' is reserved (keys starting with '_' are reserved for internal use)"
96
+ )
97
+
98
+ self.config["_Username"] = self._get_username()
99
+ self.config["_Created"] = datetime.now(timezone.utc).isoformat()
100
+ self.config["_Group"] = self.group
101
+
102
+ self._queued_logs: list[LogEntry] = []
103
+ self._queued_system_logs: list[SystemLogEntry] = []
104
+ self._queued_uploads: list[UploadEntry] = []
105
+ self._queued_alerts: list[AlertEntry] = []
106
+ self._stop_flag = threading.Event()
107
+ self._config_logged = False
108
+ max_step = SQLiteStorage.get_max_step_for_run(self.project, self.name)
109
+ self._next_step = 0 if max_step is None else max_step + 1
110
+ self._has_local_buffer = False
111
+
112
+ self._is_local = space_id is None
113
+ self._webhook_url = webhook_url or os.environ.get("TRACKIO_WEBHOOK_URL")
114
+ self._webhook_min_level = resolve_webhook_min_level(
115
+ webhook_min_level or os.environ.get("TRACKIO_WEBHOOK_MIN_LEVEL")
116
+ )
117
+
118
+ if self._is_local:
119
+ self._local_sender_thread = threading.Thread(
120
+ target=self._local_batch_sender
121
+ )
122
+ self._local_sender_thread.daemon = True
123
+ self._local_sender_thread.start()
124
+ else:
125
+ self._client_thread = threading.Thread(target=self._init_client_background)
126
+ self._client_thread.daemon = True
127
+ self._client_thread.start()
128
+
129
+ self._gpu_monitor: "GpuMonitor | AppleGpuMonitor | None" = None
130
+ if auto_log_gpu:
131
+ if gpu_available():
132
+ self._gpu_monitor = GpuMonitor(self, interval=gpu_log_interval)
133
+ self._gpu_monitor.start()
134
+ elif apple_gpu_available():
135
+ self._gpu_monitor = AppleGpuMonitor(self, interval=gpu_log_interval)
136
+ self._gpu_monitor.start()
137
+
138
+ def _get_username(self) -> str | None:
139
+ try:
140
+ return _get_default_namespace()
141
+ except Exception:
142
+ return None
143
+
144
+ def _local_batch_sender(self):
145
+ while (
146
+ not self._stop_flag.is_set()
147
+ or len(self._queued_logs) > 0
148
+ or len(self._queued_system_logs) > 0
149
+ or len(self._queued_alerts) > 0
150
+ ):
151
+ if not self._stop_flag.is_set():
152
+ self._stop_flag.wait(timeout=BATCH_SEND_INTERVAL)
153
+
154
+ with self._client_lock:
155
+ if self._queued_logs:
156
+ logs_to_send = self._queued_logs.copy()
157
+ self._queued_logs.clear()
158
+ self._write_logs_to_sqlite(logs_to_send)
159
+
160
+ if self._queued_system_logs:
161
+ system_logs_to_send = self._queued_system_logs.copy()
162
+ self._queued_system_logs.clear()
163
+ self._write_system_logs_to_sqlite(system_logs_to_send)
164
+
165
+ if self._queued_alerts:
166
+ alerts_to_send = self._queued_alerts.copy()
167
+ self._queued_alerts.clear()
168
+ self._write_alerts_to_sqlite(alerts_to_send)
169
+
170
+ def _write_logs_to_sqlite(self, logs: list[LogEntry]):
171
+ logs_by_run: dict[tuple, dict] = {}
172
+ for entry in logs:
173
+ key = (entry["project"], entry["run"])
174
+ if key not in logs_by_run:
175
+ logs_by_run[key] = {
176
+ "metrics": [],
177
+ "steps": [],
178
+ "log_ids": [],
179
+ "config": None,
180
+ }
181
+ logs_by_run[key]["metrics"].append(entry["metrics"])
182
+ logs_by_run[key]["steps"].append(entry.get("step"))
183
+ logs_by_run[key]["log_ids"].append(entry.get("log_id"))
184
+ if entry.get("config") and logs_by_run[key]["config"] is None:
185
+ logs_by_run[key]["config"] = entry["config"]
186
+
187
+ for (project, run), data in logs_by_run.items():
188
+ has_log_ids = any(lid is not None for lid in data["log_ids"])
189
+ SQLiteStorage.bulk_log(
190
+ project=project,
191
+ run=run,
192
+ metrics_list=data["metrics"],
193
+ steps=data["steps"],
194
+ config=data["config"],
195
+ log_ids=data["log_ids"] if has_log_ids else None,
196
+ )
197
+
198
+ def _write_system_logs_to_sqlite(self, logs: list[SystemLogEntry]):
199
+ logs_by_run: dict[tuple, dict] = {}
200
+ for entry in logs:
201
+ key = (entry["project"], entry["run"])
202
+ if key not in logs_by_run:
203
+ logs_by_run[key] = {"metrics": [], "timestamps": [], "log_ids": []}
204
+ logs_by_run[key]["metrics"].append(entry["metrics"])
205
+ logs_by_run[key]["timestamps"].append(entry.get("timestamp"))
206
+ logs_by_run[key]["log_ids"].append(entry.get("log_id"))
207
+
208
+ for (project, run), data in logs_by_run.items():
209
+ has_log_ids = any(lid is not None for lid in data["log_ids"])
210
+ SQLiteStorage.bulk_log_system(
211
+ project=project,
212
+ run=run,
213
+ metrics_list=data["metrics"],
214
+ timestamps=data["timestamps"],
215
+ log_ids=data["log_ids"] if has_log_ids else None,
216
+ )
217
+
218
+ def _write_alerts_to_sqlite(self, alerts: list[AlertEntry]):
219
+ alerts_by_run: dict[tuple, dict] = {}
220
+ for entry in alerts:
221
+ key = (entry["project"], entry["run"])
222
+ if key not in alerts_by_run:
223
+ alerts_by_run[key] = {
224
+ "titles": [],
225
+ "texts": [],
226
+ "levels": [],
227
+ "steps": [],
228
+ "timestamps": [],
229
+ "alert_ids": [],
230
+ }
231
+ alerts_by_run[key]["titles"].append(entry["title"])
232
+ alerts_by_run[key]["texts"].append(entry.get("text"))
233
+ alerts_by_run[key]["levels"].append(entry["level"])
234
+ alerts_by_run[key]["steps"].append(entry.get("step"))
235
+ alerts_by_run[key]["timestamps"].append(entry.get("timestamp"))
236
+ alerts_by_run[key]["alert_ids"].append(entry.get("alert_id"))
237
+
238
+ for (project, run), data in alerts_by_run.items():
239
+ has_alert_ids = any(aid is not None for aid in data["alert_ids"])
240
+ SQLiteStorage.bulk_alert(
241
+ project=project,
242
+ run=run,
243
+ titles=data["titles"],
244
+ texts=data["texts"],
245
+ levels=data["levels"],
246
+ steps=data["steps"],
247
+ timestamps=data["timestamps"],
248
+ alert_ids=data["alert_ids"] if has_alert_ids else None,
249
+ )
250
+
251
+ def _batch_sender(self):
252
+ consecutive_failures = 0
253
+ while (
254
+ not self._stop_flag.is_set()
255
+ or len(self._queued_logs) > 0
256
+ or len(self._queued_system_logs) > 0
257
+ or len(self._queued_uploads) > 0
258
+ or len(self._queued_alerts) > 0
259
+ or self._has_local_buffer
260
+ ):
261
+ if not self._stop_flag.is_set():
262
+ if consecutive_failures:
263
+ sleep_time = min(
264
+ BATCH_SEND_INTERVAL * (2**consecutive_failures), MAX_BACKOFF
265
+ )
266
+ else:
267
+ sleep_time = BATCH_SEND_INTERVAL
268
+ self._stop_flag.wait(timeout=sleep_time)
269
+ elif self._has_local_buffer:
270
+ self._stop_flag.wait(timeout=BATCH_SEND_INTERVAL)
271
+
272
+ with self._client_lock:
273
+ if self._client is None:
274
+ if self._stop_flag.is_set():
275
+ if self._queued_logs:
276
+ self._persist_logs_locally(self._queued_logs)
277
+ self._queued_logs.clear()
278
+ if self._queued_system_logs:
279
+ self._persist_system_logs_locally(self._queued_system_logs)
280
+ self._queued_system_logs.clear()
281
+ if self._queued_uploads:
282
+ self._persist_uploads_locally(self._queued_uploads)
283
+ self._queued_uploads.clear()
284
+ if self._queued_alerts:
285
+ self._write_alerts_to_sqlite(self._queued_alerts)
286
+ self._queued_alerts.clear()
287
+ return
288
+
289
+ failed = False
290
+
291
+ if self._queued_logs:
292
+ logs_to_send = self._queued_logs.copy()
293
+ self._queued_logs.clear()
294
+ try:
295
+ self._client.predict(
296
+ api_name="/bulk_log",
297
+ logs=logs_to_send,
298
+ hf_token=huggingface_hub.utils.get_token(),
299
+ )
300
+ except Exception:
301
+ self._persist_logs_locally(logs_to_send)
302
+ failed = True
303
+
304
+ if self._queued_system_logs:
305
+ system_logs_to_send = self._queued_system_logs.copy()
306
+ self._queued_system_logs.clear()
307
+ try:
308
+ self._client.predict(
309
+ api_name="/bulk_log_system",
310
+ logs=system_logs_to_send,
311
+ hf_token=huggingface_hub.utils.get_token(),
312
+ )
313
+ except Exception:
314
+ self._persist_system_logs_locally(system_logs_to_send)
315
+ failed = True
316
+
317
+ if self._queued_uploads:
318
+ uploads_to_send = self._queued_uploads.copy()
319
+ self._queued_uploads.clear()
320
+ try:
321
+ self._client.predict(
322
+ api_name="/bulk_upload_media",
323
+ uploads=uploads_to_send,
324
+ hf_token=huggingface_hub.utils.get_token(),
325
+ )
326
+ except Exception:
327
+ self._persist_uploads_locally(uploads_to_send)
328
+ failed = True
329
+
330
+ if self._queued_alerts:
331
+ alerts_to_send = self._queued_alerts.copy()
332
+ self._queued_alerts.clear()
333
+ try:
334
+ self._client.predict(
335
+ api_name="/bulk_alert",
336
+ alerts=alerts_to_send,
337
+ hf_token=huggingface_hub.utils.get_token(),
338
+ )
339
+ except Exception:
340
+ self._write_alerts_to_sqlite(alerts_to_send)
341
+ failed = True
342
+
343
+ if failed:
344
+ consecutive_failures += 1
345
+ else:
346
+ consecutive_failures = 0
347
+ if self._has_local_buffer:
348
+ self._flush_local_buffer()
349
+
350
+ def _persist_logs_locally(self, logs: list[LogEntry]):
351
+ if not self._space_id:
352
+ return
353
+ logs_by_run: dict[tuple, dict] = {}
354
+ for entry in logs:
355
+ key = (entry["project"], entry["run"])
356
+ if key not in logs_by_run:
357
+ logs_by_run[key] = {
358
+ "metrics": [],
359
+ "steps": [],
360
+ "log_ids": [],
361
+ "config": None,
362
+ }
363
+ logs_by_run[key]["metrics"].append(entry["metrics"])
364
+ logs_by_run[key]["steps"].append(entry.get("step"))
365
+ logs_by_run[key]["log_ids"].append(entry.get("log_id"))
366
+ if entry.get("config") and logs_by_run[key]["config"] is None:
367
+ logs_by_run[key]["config"] = entry["config"]
368
+
369
+ for (project, run), data in logs_by_run.items():
370
+ SQLiteStorage.bulk_log(
371
+ project=project,
372
+ run=run,
373
+ metrics_list=data["metrics"],
374
+ steps=data["steps"],
375
+ log_ids=data["log_ids"],
376
+ config=data["config"],
377
+ space_id=self._space_id,
378
+ )
379
+ self._has_local_buffer = True
380
+
381
+ def _persist_system_logs_locally(self, logs: list[SystemLogEntry]):
382
+ if not self._space_id:
383
+ return
384
+ logs_by_run: dict[tuple, dict] = {}
385
+ for entry in logs:
386
+ key = (entry["project"], entry["run"])
387
+ if key not in logs_by_run:
388
+ logs_by_run[key] = {"metrics": [], "timestamps": [], "log_ids": []}
389
+ logs_by_run[key]["metrics"].append(entry["metrics"])
390
+ logs_by_run[key]["timestamps"].append(entry.get("timestamp"))
391
+ logs_by_run[key]["log_ids"].append(entry.get("log_id"))
392
+
393
+ for (project, run), data in logs_by_run.items():
394
+ SQLiteStorage.bulk_log_system(
395
+ project=project,
396
+ run=run,
397
+ metrics_list=data["metrics"],
398
+ timestamps=data["timestamps"],
399
+ log_ids=data["log_ids"],
400
+ space_id=self._space_id,
401
+ )
402
+ self._has_local_buffer = True
403
+
404
+ def _persist_uploads_locally(self, uploads: list[UploadEntry]):
405
+ if not self._space_id:
406
+ return
407
+ for entry in uploads:
408
+ file_data = entry.get("uploaded_file")
409
+ file_path = ""
410
+ if isinstance(file_data, dict):
411
+ file_path = file_data.get("path", "")
412
+ elif hasattr(file_data, "path"):
413
+ file_path = str(file_data.path)
414
+ else:
415
+ file_path = str(file_data)
416
+ SQLiteStorage.add_pending_upload(
417
+ project=entry["project"],
418
+ space_id=self._space_id,
419
+ run_name=entry.get("run"),
420
+ step=entry.get("step"),
421
+ file_path=file_path,
422
+ relative_path=entry.get("relative_path"),
423
+ )
424
+ self._has_local_buffer = True
425
+
426
+ def _flush_local_buffer(self):
427
+ try:
428
+ buffered_logs = SQLiteStorage.get_pending_logs(self.project)
429
+ if buffered_logs:
430
+ self._client.predict(
431
+ api_name="/bulk_log",
432
+ logs=buffered_logs["logs"],
433
+ hf_token=huggingface_hub.utils.get_token(),
434
+ )
435
+ SQLiteStorage.clear_pending_logs(self.project, buffered_logs["ids"])
436
+
437
+ buffered_sys = SQLiteStorage.get_pending_system_logs(self.project)
438
+ if buffered_sys:
439
+ self._client.predict(
440
+ api_name="/bulk_log_system",
441
+ logs=buffered_sys["logs"],
442
+ hf_token=huggingface_hub.utils.get_token(),
443
+ )
444
+ SQLiteStorage.clear_pending_system_logs(
445
+ self.project, buffered_sys["ids"]
446
+ )
447
+
448
+ buffered_uploads = SQLiteStorage.get_pending_uploads(self.project)
449
+ if buffered_uploads:
450
+ upload_entries = []
451
+ for u in buffered_uploads["uploads"]:
452
+ fp = u["file_path"]
453
+ if Path(fp).exists():
454
+ upload_entries.append(
455
+ {
456
+ "project": u["project"],
457
+ "run": u["run"],
458
+ "step": u["step"],
459
+ "relative_path": u["relative_path"],
460
+ "uploaded_file": handle_file(fp),
461
+ }
462
+ )
463
+ if upload_entries:
464
+ self._client.predict(
465
+ api_name="/bulk_upload_media",
466
+ uploads=upload_entries,
467
+ hf_token=huggingface_hub.utils.get_token(),
468
+ )
469
+ SQLiteStorage.clear_pending_uploads(
470
+ self.project, buffered_uploads["ids"]
471
+ )
472
+
473
+ self._has_local_buffer = False
474
+ except Exception:
475
+ pass
476
+
477
+ def _init_client_background(self):
478
+ if self._client is None:
479
+ fib = utils.fibo()
480
+ for sleep_coefficient in fib:
481
+ if self._stop_flag.is_set():
482
+ break
483
+ try:
484
+ client = Client(self.url, verbose=False)
485
+
486
+ with self._client_lock:
487
+ self._client = client
488
+ break
489
+ except Exception:
490
+ pass
491
+ sleep_time = min(0.1 * sleep_coefficient, MAX_BACKOFF)
492
+ self._stop_flag.wait(timeout=sleep_time)
493
+
494
+ self._batch_sender()
495
+
496
+ def _queue_upload(
497
+ self,
498
+ file_path,
499
+ step: int | None,
500
+ relative_path: str | None = None,
501
+ use_run_name: bool = True,
502
+ ):
503
+ if self._is_local:
504
+ self._save_upload_locally(file_path, step, relative_path, use_run_name)
505
+ else:
506
+ upload_entry: UploadEntry = {
507
+ "project": self.project,
508
+ "run": self.name if use_run_name else None,
509
+ "step": step,
510
+ "relative_path": relative_path,
511
+ "uploaded_file": handle_file(file_path),
512
+ }
513
+ with self._client_lock:
514
+ self._queued_uploads.append(upload_entry)
515
+
516
+ def _save_upload_locally(
517
+ self,
518
+ file_path,
519
+ step: int | None,
520
+ relative_path: str | None = None,
521
+ use_run_name: bool = True,
522
+ ):
523
+ media_path = get_project_media_path(
524
+ project=self.project,
525
+ run=self.name if use_run_name else None,
526
+ step=step,
527
+ relative_path=relative_path,
528
+ )
529
+ src = Path(file_path)
530
+ if src.exists() and str(src.resolve()) != str(Path(media_path).resolve()):
531
+ shutil.copy(str(src), str(media_path))
532
+
533
+ def _process_media(self, value: TrackioMedia, step: int | None) -> dict:
534
+ value._save(self.project, self.name, step if step is not None else 0)
535
+ if self._space_id:
536
+ self._queue_upload(value._get_absolute_file_path(), step)
537
+ return value._to_dict()
538
+
539
+ def _scan_and_queue_media_uploads(self, table_dict: dict, step: int | None):
540
+ if not self._space_id:
541
+ return
542
+
543
+ table_data = table_dict.get("_value", [])
544
+ for row in table_data:
545
+ for value in row.values():
546
+ if isinstance(value, dict) and value.get("_type") in [
547
+ "trackio.image",
548
+ "trackio.video",
549
+ "trackio.audio",
550
+ ]:
551
+ file_path = value.get("file_path")
552
+ if file_path:
553
+ from trackio.utils import MEDIA_DIR
554
+
555
+ absolute_path = MEDIA_DIR / file_path
556
+ self._queue_upload(absolute_path, step)
557
+ elif isinstance(value, list):
558
+ for item in value:
559
+ if isinstance(item, dict) and item.get("_type") in [
560
+ "trackio.image",
561
+ "trackio.video",
562
+ "trackio.audio",
563
+ ]:
564
+ file_path = item.get("file_path")
565
+ if file_path:
566
+ from trackio.utils import MEDIA_DIR
567
+
568
+ absolute_path = MEDIA_DIR / file_path
569
+ self._queue_upload(absolute_path, step)
570
+
571
+ def _ensure_sender_alive(self):
572
+ if self._is_local:
573
+ if (
574
+ hasattr(self, "_local_sender_thread")
575
+ and not self._local_sender_thread.is_alive()
576
+ and not self._stop_flag.is_set()
577
+ ):
578
+ self._local_sender_thread = threading.Thread(
579
+ target=self._local_batch_sender
580
+ )
581
+ self._local_sender_thread.daemon = True
582
+ self._local_sender_thread.start()
583
+ else:
584
+ if (
585
+ self._client_thread is not None
586
+ and not self._client_thread.is_alive()
587
+ and not self._stop_flag.is_set()
588
+ ):
589
+ self._client_thread = threading.Thread(
590
+ target=self._init_client_background
591
+ )
592
+ self._client_thread.daemon = True
593
+ self._client_thread.start()
594
+
595
+ def log(self, metrics: dict, step: int | None = None):
596
+ renamed_keys = []
597
+ new_metrics = {}
598
+
599
+ for k, v in metrics.items():
600
+ if k in utils.RESERVED_KEYS or k.startswith("__"):
601
+ new_key = f"__{k}"
602
+ renamed_keys.append(k)
603
+ new_metrics[new_key] = v
604
+ else:
605
+ new_metrics[k] = v
606
+
607
+ if renamed_keys:
608
+ warnings.warn(f"Reserved keys renamed: {renamed_keys} → '__{{key}}'")
609
+
610
+ metrics = new_metrics
611
+ for key, value in metrics.items():
612
+ if isinstance(value, Table):
613
+ metrics[key] = value._to_dict(
614
+ project=self.project, run=self.name, step=step
615
+ )
616
+ self._scan_and_queue_media_uploads(metrics[key], step)
617
+ elif isinstance(value, Histogram):
618
+ metrics[key] = value._to_dict()
619
+ elif isinstance(value, Markdown):
620
+ metrics[key] = value._to_dict()
621
+ elif isinstance(value, TrackioMedia):
622
+ metrics[key] = self._process_media(value, step)
623
+ metrics = utils.serialize_values(metrics)
624
+
625
+ if step is None:
626
+ step = self._next_step
627
+ self._next_step = max(self._next_step, step + 1)
628
+
629
+ config_to_log = None
630
+ if not self._config_logged and self.config:
631
+ config_to_log = utils.to_json_safe(self.config)
632
+ self._config_logged = True
633
+
634
+ log_entry: LogEntry = {
635
+ "project": self.project,
636
+ "run": self.name,
637
+ "metrics": metrics,
638
+ "step": step,
639
+ "config": config_to_log,
640
+ "log_id": uuid.uuid4().hex,
641
+ }
642
+
643
+ with self._client_lock:
644
+ self._queued_logs.append(log_entry)
645
+ self._ensure_sender_alive()
646
+
647
+ def alert(
648
+ self,
649
+ title: str,
650
+ text: str | None = None,
651
+ level: AlertLevel = AlertLevel.WARN,
652
+ step: int | None = None,
653
+ webhook_url: str | None = None,
654
+ ):
655
+ if step is None:
656
+ step = max(self._next_step - 1, 0)
657
+ timestamp = datetime.now(timezone.utc).isoformat()
658
+
659
+ print(format_alert_terminal(level, title, text, step))
660
+
661
+ alert_entry: AlertEntry = {
662
+ "project": self.project,
663
+ "run": self.name,
664
+ "title": title,
665
+ "text": text,
666
+ "level": level.value,
667
+ "step": step,
668
+ "timestamp": timestamp,
669
+ "alert_id": uuid.uuid4().hex,
670
+ }
671
+
672
+ with self._client_lock:
673
+ self._queued_alerts.append(alert_entry)
674
+ self._ensure_sender_alive()
675
+
676
+ url = webhook_url or self._webhook_url
677
+ if url and should_send_webhook(level, self._webhook_min_level):
678
+ t = threading.Thread(
679
+ target=send_webhook,
680
+ args=(
681
+ url,
682
+ level,
683
+ title,
684
+ text,
685
+ self.project,
686
+ self.name,
687
+ step,
688
+ timestamp,
689
+ ),
690
+ daemon=True,
691
+ )
692
+ t.start()
693
+
694
+ def log_system(self, metrics: dict):
695
+ metrics = utils.serialize_values(metrics)
696
+ timestamp = datetime.now(timezone.utc).isoformat()
697
+
698
+ system_log_entry: SystemLogEntry = {
699
+ "project": self.project,
700
+ "run": self.name,
701
+ "metrics": metrics,
702
+ "timestamp": timestamp,
703
+ "log_id": uuid.uuid4().hex,
704
+ }
705
+
706
+ with self._client_lock:
707
+ self._queued_system_logs.append(system_log_entry)
708
+ self._ensure_sender_alive()
709
+
710
+ def finish(self):
711
+ if self._gpu_monitor is not None:
712
+ self._gpu_monitor.stop()
713
+
714
+ self._stop_flag.set()
715
+
716
+ if self._is_local:
717
+ if hasattr(self, "_local_sender_thread"):
718
+ print("* Run finished. Uploading logs to Trackio (please wait...)")
719
+ self._local_sender_thread.join(timeout=30)
720
+ if self._local_sender_thread.is_alive():
721
+ warnings.warn(
722
+ "Could not flush all logs within 30s. Some data may be buffered locally."
723
+ )
724
+ else:
725
+ if self._client_thread is not None:
726
+ print(
727
+ "* Run finished. Uploading logs to Trackio Space (please wait...)"
728
+ )
729
+ self._client_thread.join(timeout=30)
730
+ if self._client_thread.is_alive():
731
+ warnings.warn(
732
+ "Could not flush all logs within 30s. Some data may be buffered locally."
733
+ )
734
+ if SQLiteStorage.has_pending_data(self.project):
735
+ warnings.warn(
736
+ f"* Some logs could not be sent to the Space (it may still be starting up). "
737
+ f"They have been saved locally and will be sent automatically next time you call: "
738
+ f'trackio.init(project="{self.project}", space_id="{self._space_id}")'
739
+ )
trackio/server.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The main API layer for the Trackio UI."""
2
+
3
+ import base64
4
+ import os
5
+ import re
6
+ import secrets
7
+ import shutil
8
+ import time
9
+ from functools import lru_cache
10
+ from typing import Any
11
+ from urllib.parse import urlencode
12
+
13
+ import gradio as gr
14
+ import httpx
15
+ import huggingface_hub as hf
16
+ from starlette.requests import Request
17
+ from starlette.responses import RedirectResponse
18
+
19
+ import trackio.utils as utils
20
+ from trackio.media import get_project_media_path
21
+ from trackio.sqlite_storage import SQLiteStorage
22
+ from trackio.typehints import AlertEntry, LogEntry, SystemLogEntry, UploadEntry
23
+
24
+ HfApi = hf.HfApi()
25
+
26
+ write_token = secrets.token_urlsafe(32)
27
+
28
+ OAUTH_CALLBACK_PATH = "/login/callback"
29
+ OAUTH_START_PATH = "/oauth/hf/start"
30
+
31
+
32
+ def _hf_access_token(request: gr.Request) -> str | None:
33
+ session_id = None
34
+ try:
35
+ session_id = request.headers.get("x-trackio-oauth-session")
36
+ except (AttributeError, TypeError):
37
+ pass
38
+ if session_id and session_id in _oauth_sessions:
39
+ token, created = _oauth_sessions[session_id]
40
+ if time.monotonic() - created <= _OAUTH_SESSION_TTL:
41
+ return token
42
+ del _oauth_sessions[session_id]
43
+ cookie_header = ""
44
+ try:
45
+ cookie_header = request.headers.get("cookie", "")
46
+ except (AttributeError, TypeError):
47
+ pass
48
+ if cookie_header:
49
+ for cookie in cookie_header.split(";"):
50
+ parts = cookie.strip().split("=", 1)
51
+ if len(parts) == 2 and parts[0] == "trackio_hf_access_token":
52
+ return parts[1] or None
53
+ return None
54
+
55
+
56
+ def _oauth_redirect_uri(request: Request) -> str:
57
+ space_host = os.getenv("SPACE_HOST")
58
+ if space_host:
59
+ space_host = space_host.split(",")[0]
60
+ return f"https://{space_host}{OAUTH_CALLBACK_PATH}"
61
+ return str(request.base_url).rstrip("/") + OAUTH_CALLBACK_PATH
62
+
63
+
64
+ class TrackioServer(gr.Server):
65
+ def close(self, verbose: bool = True) -> None:
66
+ if self.blocks is None:
67
+ return
68
+ if self.blocks.is_running:
69
+ self.blocks.close(verbose=verbose)
70
+
71
+
72
+ _OAUTH_STATE_TTL = 86400
73
+ _OAUTH_SESSION_TTL = 86400 * 30
74
+ _pending_oauth_states: dict[str, float] = {}
75
+ _oauth_sessions: dict[str, tuple[str, float]] = {}
76
+
77
+
78
+ def _evict_expired_oauth():
79
+ now = time.monotonic()
80
+ expired_states = [
81
+ k for k, t in _pending_oauth_states.items() if now - t > _OAUTH_STATE_TTL
82
+ ]
83
+ for k in expired_states:
84
+ del _pending_oauth_states[k]
85
+ expired_sessions = [
86
+ k for k, (_, t) in _oauth_sessions.items() if now - t > _OAUTH_SESSION_TTL
87
+ ]
88
+ for k in expired_sessions:
89
+ del _oauth_sessions[k]
90
+
91
+
92
+ def oauth_hf_start(request: Request):
93
+ client_id = os.getenv("OAUTH_CLIENT_ID")
94
+ if not client_id:
95
+ return RedirectResponse(url="/", status_code=302)
96
+ _evict_expired_oauth()
97
+ state = secrets.token_urlsafe(32)
98
+ _pending_oauth_states[state] = time.monotonic()
99
+ redirect_uri = _oauth_redirect_uri(request)
100
+ scope = os.getenv("OAUTH_SCOPES", "openid profile").strip()
101
+ url = "https://huggingface.co/oauth/authorize?" + urlencode(
102
+ {
103
+ "client_id": client_id,
104
+ "redirect_uri": redirect_uri,
105
+ "response_type": "code",
106
+ "scope": scope,
107
+ "state": state,
108
+ }
109
+ )
110
+ return RedirectResponse(url=url, status_code=302)
111
+
112
+
113
+ def oauth_hf_callback(request: Request):
114
+ client_id = os.getenv("OAUTH_CLIENT_ID")
115
+ client_secret = os.getenv("OAUTH_CLIENT_SECRET")
116
+ err = "/?oauth_error=1"
117
+ if not client_id or not client_secret:
118
+ return RedirectResponse(url=err, status_code=302)
119
+ got_state = request.query_params.get("state")
120
+ code = request.query_params.get("code")
121
+ if not got_state or got_state not in _pending_oauth_states or not code:
122
+ return RedirectResponse(url=err, status_code=302)
123
+ state_created = _pending_oauth_states.pop(got_state)
124
+ if time.monotonic() - state_created > _OAUTH_STATE_TTL:
125
+ return RedirectResponse(url=err, status_code=302)
126
+ redirect_uri = _oauth_redirect_uri(request)
127
+ auth_b64 = base64.b64encode(f"{client_id}:{client_secret}".encode()).decode()
128
+ try:
129
+ with httpx.Client() as client:
130
+ token_resp = client.post(
131
+ "https://huggingface.co/oauth/token",
132
+ headers={"Authorization": f"Basic {auth_b64}"},
133
+ data={
134
+ "grant_type": "authorization_code",
135
+ "code": code,
136
+ "redirect_uri": redirect_uri,
137
+ "client_id": client_id,
138
+ },
139
+ )
140
+ token_resp.raise_for_status()
141
+ access_token = token_resp.json()["access_token"]
142
+ except Exception:
143
+ return RedirectResponse(url=err, status_code=302)
144
+ session_id = secrets.token_urlsafe(32)
145
+ _oauth_sessions[session_id] = (access_token, time.monotonic())
146
+ on_spaces = os.getenv("SYSTEM") == "spaces"
147
+ resp = RedirectResponse(url=f"/?oauth_session={session_id}", status_code=302)
148
+ resp.set_cookie(
149
+ key="trackio_hf_access_token",
150
+ value=access_token,
151
+ httponly=True,
152
+ samesite="none" if on_spaces else "lax",
153
+ max_age=86400 * 30,
154
+ path="/",
155
+ secure=on_spaces,
156
+ )
157
+ return resp
158
+
159
+
160
+ def oauth_logout(request: Request):
161
+ on_spaces = os.getenv("SYSTEM") == "spaces"
162
+ resp = RedirectResponse(url="/", status_code=302)
163
+ resp.delete_cookie(
164
+ "trackio_hf_access_token",
165
+ path="/",
166
+ samesite="none" if on_spaces else "lax",
167
+ secure=on_spaces,
168
+ )
169
+ return resp
170
+
171
+
172
+ @lru_cache(maxsize=32)
173
+ def check_hf_token_has_write_access(hf_token: str | None) -> None:
174
+ if os.getenv("SYSTEM") == "spaces":
175
+ if hf_token is None:
176
+ raise PermissionError(
177
+ "Expected a HF_TOKEN to be provided when logging to a Space"
178
+ )
179
+ who = HfApi.whoami(hf_token)
180
+ owner_name = os.getenv("SPACE_AUTHOR_NAME")
181
+ repo_name = os.getenv("SPACE_REPO_NAME")
182
+ orgs = [o["name"] for o in who["orgs"]]
183
+ if owner_name != who["name"] and owner_name not in orgs:
184
+ raise PermissionError(
185
+ "Expected the provided hf_token to be the user owner of the space, or be a member of the org owner of the space"
186
+ )
187
+ access_token = who["auth"]["accessToken"]
188
+ if access_token["role"] == "fineGrained":
189
+ matched = False
190
+ for item in access_token["fineGrained"]["scoped"]:
191
+ if (
192
+ item["entity"]["type"] == "space"
193
+ and item["entity"]["name"] == f"{owner_name}/{repo_name}"
194
+ and "repo.write" in item["permissions"]
195
+ ):
196
+ matched = True
197
+ break
198
+ if (
199
+ (
200
+ item["entity"]["type"] == "user"
201
+ or item["entity"]["type"] == "org"
202
+ )
203
+ and item["entity"]["name"] == owner_name
204
+ and "repo.write" in item["permissions"]
205
+ ):
206
+ matched = True
207
+ break
208
+ if not matched:
209
+ raise PermissionError(
210
+ "Expected the provided hf_token with fine grained permissions to provide write access to the space"
211
+ )
212
+ elif access_token["role"] != "write":
213
+ raise PermissionError(
214
+ "Expected the provided hf_token to provide write permissions"
215
+ )
216
+
217
+
218
+ @lru_cache(maxsize=32)
219
+ def check_oauth_token_has_write_access(oauth_token: str | None) -> None:
220
+ if not os.getenv("SYSTEM") == "spaces":
221
+ return
222
+ if oauth_token is None:
223
+ raise PermissionError(
224
+ "Expected an oauth to be provided when logging to a Space"
225
+ )
226
+ who = HfApi.whoami(oauth_token)
227
+ user_name = who["name"]
228
+ owner_name = os.getenv("SPACE_AUTHOR_NAME")
229
+ if user_name == owner_name:
230
+ return
231
+ for org in who["orgs"]:
232
+ if org["name"] == owner_name and org["roleInOrg"] == "write":
233
+ return
234
+ raise PermissionError(
235
+ "Expected the oauth token to be the user owner of the space, or be a member of the org owner of the space"
236
+ )
237
+
238
+
239
+ def check_write_access(request: gr.Request, token: str) -> bool:
240
+ cookies = request.headers.get("cookie", "")
241
+ if cookies:
242
+ for cookie in cookies.split(";"):
243
+ parts = cookie.strip().split("=", 1)
244
+ if len(parts) == 2 and parts[0] == "trackio_write_token":
245
+ return parts[1] == token
246
+ if hasattr(request, "query_params") and request.query_params:
247
+ qp = request.query_params.get("write_token")
248
+ return qp == token
249
+ return False
250
+
251
+
252
+ def assert_can_mutate_runs(request: gr.Request) -> None:
253
+ if os.getenv("SYSTEM") != "spaces":
254
+ return
255
+ hf_tok = _hf_access_token(request)
256
+ if hf_tok is not None:
257
+ try:
258
+ check_oauth_token_has_write_access(hf_tok)
259
+ except PermissionError as e:
260
+ raise gr.Error(str(e)) from e
261
+ return
262
+ if check_write_access(request, write_token):
263
+ return
264
+ raise gr.Error(
265
+ "Sign in with Hugging Face to delete or rename runs. You need write access to this Space, "
266
+ "or open the dashboard using a link that includes the write_token query parameter."
267
+ )
268
+
269
+
270
+ def get_run_mutation_status(request: gr.Request) -> dict[str, Any]:
271
+ if os.getenv("SYSTEM") != "spaces":
272
+ return {"spaces": False, "allowed": True, "auth": "local"}
273
+ hf_tok = _hf_access_token(request)
274
+ if hf_tok is not None:
275
+ try:
276
+ check_oauth_token_has_write_access(hf_tok)
277
+ return {"spaces": True, "allowed": True, "auth": "oauth"}
278
+ except PermissionError:
279
+ return {"spaces": True, "allowed": False, "auth": "oauth_insufficient"}
280
+ if check_write_access(request, write_token):
281
+ return {"spaces": True, "allowed": True, "auth": "write_token"}
282
+ return {"spaces": True, "allowed": False, "auth": "none"}
283
+
284
+
285
+ def upload_db_to_space(
286
+ project: str, uploaded_db: gr.FileData, hf_token: str | None
287
+ ) -> None:
288
+ check_hf_token_has_write_access(hf_token)
289
+ db_project_path = SQLiteStorage.get_project_db_path(project)
290
+ os.makedirs(os.path.dirname(db_project_path), exist_ok=True)
291
+ shutil.copy(uploaded_db["path"], db_project_path)
292
+
293
+
294
+ def bulk_upload_media(uploads: list[UploadEntry], hf_token: str | None) -> None:
295
+ check_hf_token_has_write_access(hf_token)
296
+ for upload in uploads:
297
+ media_path = get_project_media_path(
298
+ project=upload["project"],
299
+ run=upload["run"],
300
+ step=upload["step"],
301
+ relative_path=upload["relative_path"],
302
+ )
303
+ shutil.copy(upload["uploaded_file"]["path"], media_path)
304
+
305
+
306
+ def log(
307
+ project: str,
308
+ run: str,
309
+ metrics: dict[str, Any],
310
+ step: int | None,
311
+ hf_token: str | None,
312
+ ) -> None:
313
+ check_hf_token_has_write_access(hf_token)
314
+ SQLiteStorage.log(project=project, run=run, metrics=metrics, step=step)
315
+
316
+
317
+ def bulk_log(
318
+ logs: list[LogEntry],
319
+ hf_token: str | None,
320
+ ) -> None:
321
+ check_hf_token_has_write_access(hf_token)
322
+
323
+ logs_by_run = {}
324
+ for log_entry in logs:
325
+ key = (log_entry["project"], log_entry["run"])
326
+ if key not in logs_by_run:
327
+ logs_by_run[key] = {
328
+ "metrics": [],
329
+ "steps": [],
330
+ "log_ids": [],
331
+ "config": None,
332
+ }
333
+ logs_by_run[key]["metrics"].append(log_entry["metrics"])
334
+ logs_by_run[key]["steps"].append(log_entry.get("step"))
335
+ logs_by_run[key]["log_ids"].append(log_entry.get("log_id"))
336
+ if log_entry.get("config") and logs_by_run[key]["config"] is None:
337
+ logs_by_run[key]["config"] = log_entry["config"]
338
+
339
+ for (project, run), data in logs_by_run.items():
340
+ has_log_ids = any(lid is not None for lid in data["log_ids"])
341
+ SQLiteStorage.bulk_log(
342
+ project=project,
343
+ run=run,
344
+ metrics_list=data["metrics"],
345
+ steps=data["steps"],
346
+ config=data["config"],
347
+ log_ids=data["log_ids"] if has_log_ids else None,
348
+ )
349
+
350
+
351
+ def bulk_log_system(
352
+ logs: list[SystemLogEntry],
353
+ hf_token: str | None,
354
+ ) -> None:
355
+ check_hf_token_has_write_access(hf_token)
356
+
357
+ logs_by_run = {}
358
+ for log_entry in logs:
359
+ key = (log_entry["project"], log_entry["run"])
360
+ if key not in logs_by_run:
361
+ logs_by_run[key] = {"metrics": [], "timestamps": [], "log_ids": []}
362
+ logs_by_run[key]["metrics"].append(log_entry["metrics"])
363
+ logs_by_run[key]["timestamps"].append(log_entry.get("timestamp"))
364
+ logs_by_run[key]["log_ids"].append(log_entry.get("log_id"))
365
+
366
+ for (project, run), data in logs_by_run.items():
367
+ has_log_ids = any(lid is not None for lid in data["log_ids"])
368
+ SQLiteStorage.bulk_log_system(
369
+ project=project,
370
+ run=run,
371
+ metrics_list=data["metrics"],
372
+ timestamps=data["timestamps"],
373
+ log_ids=data["log_ids"] if has_log_ids else None,
374
+ )
375
+
376
+
377
+ def bulk_alert(
378
+ alerts: list[AlertEntry],
379
+ hf_token: str | None,
380
+ ) -> None:
381
+ check_hf_token_has_write_access(hf_token)
382
+
383
+ alerts_by_run: dict[tuple, dict] = {}
384
+ for entry in alerts:
385
+ key = (entry["project"], entry["run"])
386
+ if key not in alerts_by_run:
387
+ alerts_by_run[key] = {
388
+ "titles": [],
389
+ "texts": [],
390
+ "levels": [],
391
+ "steps": [],
392
+ "timestamps": [],
393
+ "alert_ids": [],
394
+ }
395
+ alerts_by_run[key]["titles"].append(entry["title"])
396
+ alerts_by_run[key]["texts"].append(entry.get("text"))
397
+ alerts_by_run[key]["levels"].append(entry["level"])
398
+ alerts_by_run[key]["steps"].append(entry.get("step"))
399
+ alerts_by_run[key]["timestamps"].append(entry.get("timestamp"))
400
+ alerts_by_run[key]["alert_ids"].append(entry.get("alert_id"))
401
+
402
+ for (project, run), data in alerts_by_run.items():
403
+ has_alert_ids = any(aid is not None for aid in data["alert_ids"])
404
+ SQLiteStorage.bulk_alert(
405
+ project=project,
406
+ run=run,
407
+ titles=data["titles"],
408
+ texts=data["texts"],
409
+ levels=data["levels"],
410
+ steps=data["steps"],
411
+ timestamps=data["timestamps"],
412
+ alert_ids=data["alert_ids"] if has_alert_ids else None,
413
+ )
414
+
415
+
416
+ def get_alerts(
417
+ project: str,
418
+ run: str | None = None,
419
+ level: str | None = None,
420
+ since: str | None = None,
421
+ ) -> list[dict]:
422
+ return SQLiteStorage.get_alerts(project, run_name=run, level=level, since=since)
423
+
424
+
425
+ def get_metric_values(
426
+ project: str,
427
+ run: str,
428
+ metric_name: str,
429
+ step: int | None = None,
430
+ around_step: int | None = None,
431
+ at_time: str | None = None,
432
+ window: int | None = None,
433
+ ) -> list[dict]:
434
+ return SQLiteStorage.get_metric_values(
435
+ project,
436
+ run,
437
+ metric_name,
438
+ step=step,
439
+ around_step=around_step,
440
+ at_time=at_time,
441
+ window=window,
442
+ )
443
+
444
+
445
+ def get_runs_for_project(project: str) -> list[str]:
446
+ return SQLiteStorage.get_runs(project)
447
+
448
+
449
+ def get_metrics_for_run(project: str, run: str) -> list[str]:
450
+ return SQLiteStorage.get_all_metrics_for_run(project, run)
451
+
452
+
453
+ def filter_metrics_by_regex(metrics: list[str], filter_pattern: str) -> list[str]:
454
+ if not filter_pattern.strip():
455
+ return metrics
456
+ try:
457
+ pattern = re.compile(filter_pattern, re.IGNORECASE)
458
+ return [metric for metric in metrics if pattern.search(metric)]
459
+ except re.error:
460
+ return [
461
+ metric for metric in metrics if filter_pattern.lower() in metric.lower()
462
+ ]
463
+
464
+
465
+ def get_all_projects() -> list[str]:
466
+ return SQLiteStorage.get_projects()
467
+
468
+
469
+ def get_project_summary(project: str) -> dict:
470
+ runs = SQLiteStorage.get_runs(project)
471
+ if not runs:
472
+ return {"project": project, "num_runs": 0, "runs": [], "last_activity": None}
473
+
474
+ last_steps = SQLiteStorage.get_max_steps_for_runs(project)
475
+
476
+ return {
477
+ "project": project,
478
+ "num_runs": len(runs),
479
+ "runs": runs,
480
+ "last_activity": max(last_steps.values()) if last_steps else None,
481
+ }
482
+
483
+
484
+ def get_run_summary(project: str, run: str) -> dict:
485
+ num_logs = SQLiteStorage.get_log_count(project, run)
486
+ if num_logs == 0:
487
+ return {
488
+ "project": project,
489
+ "run": run,
490
+ "num_logs": 0,
491
+ "metrics": [],
492
+ "config": None,
493
+ "last_step": None,
494
+ }
495
+
496
+ metrics = SQLiteStorage.get_all_metrics_for_run(project, run)
497
+ config = SQLiteStorage.get_run_config(project, run)
498
+ last_step = SQLiteStorage.get_last_step(project, run)
499
+
500
+ return {
501
+ "project": project,
502
+ "run": run,
503
+ "num_logs": num_logs,
504
+ "metrics": metrics,
505
+ "config": config,
506
+ "last_step": last_step,
507
+ }
508
+
509
+
510
+ def get_system_metrics_for_run(project: str, run: str) -> list[str]:
511
+ return SQLiteStorage.get_all_system_metrics_for_run(project, run)
512
+
513
+
514
+ def get_system_logs(project: str, run: str) -> list[dict]:
515
+ return SQLiteStorage.get_system_logs(project, run)
516
+
517
+
518
+ def get_snapshot(
519
+ project: str,
520
+ run: str,
521
+ step: int | None = None,
522
+ around_step: int | None = None,
523
+ at_time: str | None = None,
524
+ window: int | None = None,
525
+ ) -> dict:
526
+ return SQLiteStorage.get_snapshot(
527
+ project, run, step=step, around_step=around_step, at_time=at_time, window=window
528
+ )
529
+
530
+
531
+ def get_logs(project: str, run: str) -> list[dict]:
532
+ return SQLiteStorage.get_logs(project, run, max_points=1500)
533
+
534
+
535
+ def get_settings() -> dict:
536
+ return {
537
+ "logo_urls": utils.get_logo_urls(),
538
+ "color_palette": utils.get_color_palette(),
539
+ "plot_order": [
540
+ item.strip()
541
+ for item in os.environ.get("TRACKIO_PLOT_ORDER", "").split(",")
542
+ if item.strip()
543
+ ],
544
+ "table_truncate_length": int(
545
+ os.environ.get("TRACKIO_TABLE_TRUNCATE_LENGTH", "250")
546
+ ),
547
+ }
548
+
549
+
550
+ def get_project_files(project: str) -> list[dict]:
551
+ files_dir = utils.MEDIA_DIR / project / "files"
552
+ if not files_dir.exists():
553
+ return []
554
+ results = []
555
+ for file_path in sorted(files_dir.rglob("*")):
556
+ if file_path.is_file():
557
+ relative = file_path.relative_to(files_dir)
558
+ results.append(
559
+ {
560
+ "name": str(relative),
561
+ "path": str(file_path),
562
+ }
563
+ )
564
+ return results
565
+
566
+
567
+ def delete_run(request: gr.Request, project: str, run: str) -> bool:
568
+ assert_can_mutate_runs(request)
569
+ return SQLiteStorage.delete_run(project, run)
570
+
571
+
572
+ def rename_run(
573
+ request: gr.Request,
574
+ project: str,
575
+ old_name: str,
576
+ new_name: str,
577
+ ) -> bool:
578
+ assert_can_mutate_runs(request)
579
+ SQLiteStorage.rename_run(project, old_name, new_name)
580
+ return True
581
+
582
+
583
+ def force_sync() -> bool:
584
+ SQLiteStorage._dataset_import_attempted = True
585
+ SQLiteStorage.export_to_parquet()
586
+ scheduler = SQLiteStorage.get_scheduler()
587
+ scheduler.trigger().result()
588
+ return True
589
+
590
+
591
+ CSS = ""
592
+ HEAD = ""
593
+
594
+ gr.set_static_paths(paths=[utils.MEDIA_DIR])
595
+
596
+
597
+ def make_trackio_server() -> TrackioServer:
598
+ server = TrackioServer(title="Trackio Dashboard")
599
+ server.add_api_route(OAUTH_START_PATH, oauth_hf_start, methods=["GET"])
600
+ server.add_api_route(OAUTH_CALLBACK_PATH, oauth_hf_callback, methods=["GET"])
601
+ server.add_api_route("/oauth/logout", oauth_logout, methods=["GET"])
602
+ server.api(fn=get_run_mutation_status, name="get_run_mutation_status")
603
+ server.api(fn=upload_db_to_space, name="upload_db_to_space")
604
+ server.api(fn=bulk_upload_media, name="bulk_upload_media")
605
+ server.api(fn=log, name="log")
606
+ server.api(fn=bulk_log, name="bulk_log")
607
+ server.api(fn=bulk_log_system, name="bulk_log_system")
608
+ server.api(fn=bulk_alert, name="bulk_alert")
609
+ server.api(fn=get_alerts, name="get_alerts")
610
+ server.api(fn=get_metric_values, name="get_metric_values")
611
+ server.api(fn=get_runs_for_project, name="get_runs_for_project")
612
+ server.api(fn=get_metrics_for_run, name="get_metrics_for_run")
613
+ server.api(fn=get_all_projects, name="get_all_projects")
614
+ server.api(fn=get_project_summary, name="get_project_summary")
615
+ server.api(fn=get_run_summary, name="get_run_summary")
616
+ server.api(fn=get_system_metrics_for_run, name="get_system_metrics_for_run")
617
+ server.api(fn=get_system_logs, name="get_system_logs")
618
+ server.api(fn=get_snapshot, name="get_snapshot")
619
+ server.api(fn=get_logs, name="get_logs")
620
+ server.api(fn=get_settings, name="get_settings")
621
+ server.api(fn=get_project_files, name="get_project_files")
622
+ server.api(fn=delete_run, name="delete_run")
623
+ server.api(fn=rename_run, name="rename_run")
624
+ server.api(fn=force_sync, name="force_sync")
625
+ server.write_token = write_token
626
+ return server
trackio/sqlite_storage.py ADDED
@@ -0,0 +1,1821 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import sqlite3
4
+ import time
5
+ from datetime import datetime, timezone
6
+ from pathlib import Path
7
+ from threading import Lock
8
+
9
+ try:
10
+ import fcntl
11
+ except ImportError:
12
+ fcntl = None
13
+
14
+ try:
15
+ import msvcrt as _msvcrt
16
+ except ImportError:
17
+ _msvcrt = None
18
+
19
+ import huggingface_hub as hf
20
+ import orjson
21
+ import pandas as pd
22
+
23
+ from trackio.commit_scheduler import CommitScheduler
24
+ from trackio.dummy_commit_scheduler import DummyCommitScheduler
25
+ from trackio.utils import (
26
+ MEDIA_DIR,
27
+ TRACKIO_DIR,
28
+ deserialize_values,
29
+ serialize_values,
30
+ )
31
+
32
+ DB_EXT = ".db"
33
+
34
+
35
+ class ProcessLock:
36
+ """A file-based lock that works across processes using fcntl (Unix) or msvcrt (Windows)."""
37
+
38
+ def __init__(self, lockfile_path: Path):
39
+ self.lockfile_path = lockfile_path
40
+ self.lockfile = None
41
+
42
+ def __enter__(self):
43
+ if fcntl is None and _msvcrt is None:
44
+ return self
45
+ self.lockfile_path.parent.mkdir(parents=True, exist_ok=True)
46
+ self.lockfile = open(self.lockfile_path, "w")
47
+
48
+ max_retries = 100
49
+ for attempt in range(max_retries):
50
+ try:
51
+ if fcntl is not None:
52
+ fcntl.flock(self.lockfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
53
+ else:
54
+ _msvcrt.locking(self.lockfile.fileno(), _msvcrt.LK_NBLCK, 1)
55
+ return self
56
+ except (IOError, OSError):
57
+ if attempt < max_retries - 1:
58
+ time.sleep(0.1)
59
+ else:
60
+ raise IOError("Could not acquire database lock after 10 seconds")
61
+
62
+ def __exit__(self, exc_type, exc_val, exc_tb):
63
+ if self.lockfile:
64
+ try:
65
+ if fcntl is not None:
66
+ fcntl.flock(self.lockfile.fileno(), fcntl.LOCK_UN)
67
+ elif _msvcrt is not None:
68
+ _msvcrt.locking(self.lockfile.fileno(), _msvcrt.LK_UNLCK, 1)
69
+ except (IOError, OSError):
70
+ pass
71
+ self.lockfile.close()
72
+
73
+
74
+ class SQLiteStorage:
75
+ _dataset_import_attempted = False
76
+ _current_scheduler: CommitScheduler | DummyCommitScheduler | None = None
77
+ _scheduler_lock = Lock()
78
+
79
+ @staticmethod
80
+ def _get_connection(db_path: Path) -> sqlite3.Connection:
81
+ conn = sqlite3.connect(str(db_path), timeout=30.0)
82
+ # Keep WAL for concurrency + performance on many small writes
83
+ conn.execute("PRAGMA journal_mode = WAL")
84
+ # ---- Minimal perf tweaks for many tiny transactions ----
85
+ # NORMAL = fsync at critical points only (safer than OFF, much faster than FULL)
86
+ conn.execute("PRAGMA synchronous = NORMAL")
87
+ # Keep temp data in memory to avoid disk hits during small writes
88
+ conn.execute("PRAGMA temp_store = MEMORY")
89
+ # Give SQLite a bit more room for cache (negative = KB, engine-managed)
90
+ conn.execute("PRAGMA cache_size = -20000")
91
+ # --------------------------------------------------------
92
+ conn.row_factory = sqlite3.Row
93
+ return conn
94
+
95
+ @staticmethod
96
+ def _get_process_lock(project: str) -> ProcessLock:
97
+ lockfile_path = TRACKIO_DIR / f"{project}.lock"
98
+ return ProcessLock(lockfile_path)
99
+
100
+ @staticmethod
101
+ def get_project_db_filename(project: str) -> str:
102
+ """Get the database filename for a specific project."""
103
+ safe_project_name = "".join(
104
+ c for c in project if c.isalnum() or c in ("-", "_")
105
+ ).rstrip()
106
+ if not safe_project_name:
107
+ safe_project_name = "default"
108
+ return f"{safe_project_name}{DB_EXT}"
109
+
110
+ @staticmethod
111
+ def get_project_db_path(project: str) -> Path:
112
+ """Get the database path for a specific project."""
113
+ filename = SQLiteStorage.get_project_db_filename(project)
114
+ return TRACKIO_DIR / filename
115
+
116
+ @staticmethod
117
+ def init_db(project: str) -> Path:
118
+ """
119
+ Initialize the SQLite database with required tables.
120
+ Returns the database path.
121
+ """
122
+ db_path = SQLiteStorage.get_project_db_path(project)
123
+ db_path.parent.mkdir(parents=True, exist_ok=True)
124
+ with SQLiteStorage._get_process_lock(project):
125
+ with sqlite3.connect(str(db_path), timeout=30.0) as conn:
126
+ conn.execute("PRAGMA journal_mode = WAL")
127
+ conn.execute("PRAGMA synchronous = NORMAL")
128
+ conn.execute("PRAGMA temp_store = MEMORY")
129
+ conn.execute("PRAGMA cache_size = -20000")
130
+ cursor = conn.cursor()
131
+ cursor.execute(
132
+ """
133
+ CREATE TABLE IF NOT EXISTS metrics (
134
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
135
+ timestamp TEXT NOT NULL,
136
+ run_name TEXT NOT NULL,
137
+ step INTEGER NOT NULL,
138
+ metrics TEXT NOT NULL
139
+ )
140
+ """
141
+ )
142
+ cursor.execute(
143
+ """
144
+ CREATE TABLE IF NOT EXISTS configs (
145
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
146
+ run_name TEXT NOT NULL,
147
+ config TEXT NOT NULL,
148
+ created_at TEXT NOT NULL,
149
+ UNIQUE(run_name)
150
+ )
151
+ """
152
+ )
153
+ cursor.execute(
154
+ """
155
+ CREATE INDEX IF NOT EXISTS idx_metrics_run_step
156
+ ON metrics(run_name, step)
157
+ """
158
+ )
159
+ cursor.execute(
160
+ """
161
+ CREATE INDEX IF NOT EXISTS idx_configs_run_name
162
+ ON configs(run_name)
163
+ """
164
+ )
165
+ cursor.execute(
166
+ """
167
+ CREATE INDEX IF NOT EXISTS idx_metrics_run_timestamp
168
+ ON metrics(run_name, timestamp)
169
+ """
170
+ )
171
+ cursor.execute(
172
+ """
173
+ CREATE TABLE IF NOT EXISTS system_metrics (
174
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
175
+ timestamp TEXT NOT NULL,
176
+ run_name TEXT NOT NULL,
177
+ metrics TEXT NOT NULL
178
+ )
179
+ """
180
+ )
181
+ cursor.execute(
182
+ """
183
+ CREATE INDEX IF NOT EXISTS idx_system_metrics_run_timestamp
184
+ ON system_metrics(run_name, timestamp)
185
+ """
186
+ )
187
+
188
+ cursor.execute(
189
+ """
190
+ CREATE TABLE IF NOT EXISTS project_metadata (
191
+ key TEXT PRIMARY KEY,
192
+ value TEXT NOT NULL
193
+ )
194
+ """
195
+ )
196
+
197
+ cursor.execute(
198
+ """
199
+ CREATE TABLE IF NOT EXISTS pending_uploads (
200
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
201
+ space_id TEXT NOT NULL,
202
+ run_name TEXT,
203
+ step INTEGER,
204
+ file_path TEXT NOT NULL,
205
+ relative_path TEXT,
206
+ created_at TEXT NOT NULL
207
+ )
208
+ """
209
+ )
210
+
211
+ cursor.execute(
212
+ """
213
+ CREATE TABLE IF NOT EXISTS alerts (
214
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
215
+ timestamp TEXT NOT NULL,
216
+ run_name TEXT NOT NULL,
217
+ title TEXT NOT NULL,
218
+ text TEXT,
219
+ level TEXT NOT NULL DEFAULT 'warn',
220
+ step INTEGER,
221
+ alert_id TEXT
222
+ )
223
+ """
224
+ )
225
+ cursor.execute(
226
+ """
227
+ CREATE INDEX IF NOT EXISTS idx_alerts_run
228
+ ON alerts(run_name)
229
+ """
230
+ )
231
+ cursor.execute(
232
+ """
233
+ CREATE INDEX IF NOT EXISTS idx_alerts_timestamp
234
+ ON alerts(timestamp)
235
+ """
236
+ )
237
+ cursor.execute(
238
+ """
239
+ CREATE UNIQUE INDEX IF NOT EXISTS idx_alerts_alert_id
240
+ ON alerts(alert_id) WHERE alert_id IS NOT NULL
241
+ """
242
+ )
243
+
244
+ for table in ("metrics", "system_metrics"):
245
+ for col in ("log_id TEXT", "space_id TEXT"):
246
+ try:
247
+ cursor.execute(f"ALTER TABLE {table} ADD COLUMN {col}")
248
+ except sqlite3.OperationalError:
249
+ pass
250
+ cursor.execute(
251
+ f"""CREATE UNIQUE INDEX IF NOT EXISTS idx_{table}_log_id
252
+ ON {table}(log_id) WHERE log_id IS NOT NULL"""
253
+ )
254
+ cursor.execute(
255
+ f"""CREATE INDEX IF NOT EXISTS idx_{table}_pending
256
+ ON {table}(space_id) WHERE space_id IS NOT NULL"""
257
+ )
258
+
259
+ conn.commit()
260
+ return db_path
261
+
262
+ @staticmethod
263
+ def export_to_parquet():
264
+ """
265
+ Exports all projects' DB files as Parquet under the same path but with extension ".parquet".
266
+ Also exports system_metrics to separate parquet files with "_system.parquet" suffix.
267
+ Also exports configs to separate parquet files with "_configs.parquet" suffix.
268
+ """
269
+ if not SQLiteStorage._dataset_import_attempted:
270
+ return
271
+ if not TRACKIO_DIR.exists():
272
+ return
273
+
274
+ all_paths = os.listdir(TRACKIO_DIR)
275
+ db_names = [f for f in all_paths if f.endswith(DB_EXT)]
276
+ for db_name in db_names:
277
+ db_path = TRACKIO_DIR / db_name
278
+ parquet_path = db_path.with_suffix(".parquet")
279
+ system_parquet_path = db_path.with_suffix("") / ""
280
+ system_parquet_path = TRACKIO_DIR / (db_path.stem + "_system.parquet")
281
+ configs_parquet_path = TRACKIO_DIR / (db_path.stem + "_configs.parquet")
282
+ if (not parquet_path.exists()) or (
283
+ db_path.stat().st_mtime > parquet_path.stat().st_mtime
284
+ ):
285
+ with sqlite3.connect(str(db_path)) as conn:
286
+ df = pd.read_sql("SELECT * FROM metrics", conn)
287
+ if not df.empty:
288
+ metrics = df["metrics"].copy()
289
+ metrics = pd.DataFrame(
290
+ metrics.apply(
291
+ lambda x: deserialize_values(orjson.loads(x))
292
+ ).values.tolist(),
293
+ index=df.index,
294
+ )
295
+ del df["metrics"]
296
+ for col in metrics.columns:
297
+ df[col] = metrics[col]
298
+ df.to_parquet(
299
+ parquet_path,
300
+ write_page_index=True,
301
+ use_content_defined_chunking=True,
302
+ )
303
+
304
+ if (not system_parquet_path.exists()) or (
305
+ db_path.stat().st_mtime > system_parquet_path.stat().st_mtime
306
+ ):
307
+ with sqlite3.connect(str(db_path)) as conn:
308
+ try:
309
+ sys_df = pd.read_sql("SELECT * FROM system_metrics", conn)
310
+ except Exception:
311
+ sys_df = pd.DataFrame()
312
+ if not sys_df.empty:
313
+ sys_metrics = sys_df["metrics"].copy()
314
+ sys_metrics = pd.DataFrame(
315
+ sys_metrics.apply(
316
+ lambda x: deserialize_values(orjson.loads(x))
317
+ ).values.tolist(),
318
+ index=sys_df.index,
319
+ )
320
+ del sys_df["metrics"]
321
+ for col in sys_metrics.columns:
322
+ sys_df[col] = sys_metrics[col]
323
+ sys_df.to_parquet(
324
+ system_parquet_path,
325
+ write_page_index=True,
326
+ use_content_defined_chunking=True,
327
+ )
328
+
329
+ if (not configs_parquet_path.exists()) or (
330
+ db_path.stat().st_mtime > configs_parquet_path.stat().st_mtime
331
+ ):
332
+ with sqlite3.connect(str(db_path)) as conn:
333
+ try:
334
+ configs_df = pd.read_sql("SELECT * FROM configs", conn)
335
+ except Exception:
336
+ configs_df = pd.DataFrame()
337
+ if not configs_df.empty:
338
+ config_data = configs_df["config"].copy()
339
+ config_data = pd.DataFrame(
340
+ config_data.apply(
341
+ lambda x: deserialize_values(orjson.loads(x))
342
+ ).values.tolist(),
343
+ index=configs_df.index,
344
+ )
345
+ del configs_df["config"]
346
+ for col in config_data.columns:
347
+ configs_df[col] = config_data[col]
348
+ configs_df.to_parquet(
349
+ configs_parquet_path,
350
+ write_page_index=True,
351
+ use_content_defined_chunking=True,
352
+ )
353
+
354
+ @staticmethod
355
+ def _cleanup_wal_sidecars(db_path: Path) -> None:
356
+ """Remove leftover -wal/-shm files for a DB basename (prevents disk I/O errors)."""
357
+ for suffix in ("-wal", "-shm"):
358
+ sidecar = Path(str(db_path) + suffix)
359
+ try:
360
+ if sidecar.exists():
361
+ sidecar.unlink()
362
+ except Exception:
363
+ pass
364
+
365
+ @staticmethod
366
+ def import_from_parquet():
367
+ """
368
+ Imports to all DB files that have matching files under the same path but with extension ".parquet".
369
+ Also imports system_metrics from "_system.parquet" files.
370
+ Also imports configs from "_configs.parquet" files.
371
+ """
372
+ if not TRACKIO_DIR.exists():
373
+ return
374
+
375
+ all_paths = os.listdir(TRACKIO_DIR)
376
+ parquet_names = [
377
+ f
378
+ for f in all_paths
379
+ if f.endswith(".parquet")
380
+ and not f.endswith("_system.parquet")
381
+ and not f.endswith("_configs.parquet")
382
+ ]
383
+ imported_projects = {Path(name).stem for name in parquet_names}
384
+ for pq_name in parquet_names:
385
+ parquet_path = TRACKIO_DIR / pq_name
386
+ db_path = parquet_path.with_suffix(DB_EXT)
387
+
388
+ SQLiteStorage._cleanup_wal_sidecars(db_path)
389
+
390
+ df = pd.read_parquet(parquet_path)
391
+ if "metrics" not in df.columns:
392
+ metrics = df.copy()
393
+ structural_cols = [
394
+ "id",
395
+ "timestamp",
396
+ "run_name",
397
+ "step",
398
+ "log_id",
399
+ "space_id",
400
+ ]
401
+ df = df[[c for c in structural_cols if c in df.columns]]
402
+ for col in structural_cols:
403
+ if col in metrics.columns:
404
+ del metrics[col]
405
+ metrics = orjson.loads(metrics.to_json(orient="records"))
406
+ df["metrics"] = [orjson.dumps(serialize_values(row)) for row in metrics]
407
+
408
+ with sqlite3.connect(str(db_path), timeout=30.0) as conn:
409
+ df.to_sql("metrics", conn, if_exists="replace", index=False)
410
+ conn.commit()
411
+
412
+ system_parquet_names = [f for f in all_paths if f.endswith("_system.parquet")]
413
+ for pq_name in system_parquet_names:
414
+ parquet_path = TRACKIO_DIR / pq_name
415
+ db_name = pq_name.replace("_system.parquet", DB_EXT)
416
+ db_path = TRACKIO_DIR / db_name
417
+ project_name = db_path.stem
418
+ if project_name not in imported_projects and not db_path.exists():
419
+ continue
420
+
421
+ df = pd.read_parquet(parquet_path)
422
+ if "metrics" not in df.columns:
423
+ metrics = df.copy()
424
+ other_cols = ["id", "timestamp", "run_name"]
425
+ df = df[[c for c in other_cols if c in df.columns]]
426
+ for col in other_cols:
427
+ if col in metrics.columns:
428
+ del metrics[col]
429
+ metrics = orjson.loads(metrics.to_json(orient="records"))
430
+ df["metrics"] = [orjson.dumps(serialize_values(row)) for row in metrics]
431
+
432
+ with sqlite3.connect(str(db_path), timeout=30.0) as conn:
433
+ df.to_sql("system_metrics", conn, if_exists="replace", index=False)
434
+ conn.commit()
435
+
436
+ configs_parquet_names = [f for f in all_paths if f.endswith("_configs.parquet")]
437
+ for pq_name in configs_parquet_names:
438
+ parquet_path = TRACKIO_DIR / pq_name
439
+ db_name = pq_name.replace("_configs.parquet", DB_EXT)
440
+ db_path = TRACKIO_DIR / db_name
441
+ project_name = db_path.stem
442
+ if project_name not in imported_projects and not db_path.exists():
443
+ continue
444
+
445
+ df = pd.read_parquet(parquet_path)
446
+ if "config" not in df.columns:
447
+ config_data = df.copy()
448
+ other_cols = ["id", "run_name", "created_at"]
449
+ df = df[[c for c in other_cols if c in df.columns]]
450
+ for col in other_cols:
451
+ if col in config_data.columns:
452
+ del config_data[col]
453
+ config_data = orjson.loads(config_data.to_json(orient="records"))
454
+ df["config"] = [
455
+ orjson.dumps(serialize_values(row)) for row in config_data
456
+ ]
457
+
458
+ with sqlite3.connect(str(db_path), timeout=30.0) as conn:
459
+ df.to_sql("configs", conn, if_exists="replace", index=False)
460
+ conn.commit()
461
+
462
+ @staticmethod
463
+ def get_scheduler():
464
+ """
465
+ Get the scheduler for the database based on the environment variables.
466
+ This applies to both local and Spaces.
467
+ """
468
+ with SQLiteStorage._scheduler_lock:
469
+ if SQLiteStorage._current_scheduler is not None:
470
+ return SQLiteStorage._current_scheduler
471
+ hf_token = os.environ.get("HF_TOKEN")
472
+ dataset_id = os.environ.get("TRACKIO_DATASET_ID")
473
+ space_repo_name = os.environ.get("SPACE_REPO_NAME")
474
+ if dataset_id is None or space_repo_name is None:
475
+ scheduler = DummyCommitScheduler()
476
+ else:
477
+ scheduler = CommitScheduler(
478
+ repo_id=dataset_id,
479
+ repo_type="dataset",
480
+ folder_path=TRACKIO_DIR,
481
+ private=True,
482
+ allow_patterns=[
483
+ "*.parquet",
484
+ "*_system.parquet",
485
+ "*_configs.parquet",
486
+ "media/**/*",
487
+ ],
488
+ squash_history=True,
489
+ token=hf_token,
490
+ on_before_commit=SQLiteStorage.export_to_parquet,
491
+ )
492
+ SQLiteStorage._current_scheduler = scheduler
493
+ return scheduler
494
+
495
+ @staticmethod
496
+ def log(project: str, run: str, metrics: dict, step: int | None = None):
497
+ """
498
+ Safely log metrics to the database. Before logging, this method will ensure the database exists
499
+ and is set up with the correct tables. It also uses a cross-process lock to prevent
500
+ database locking errors when multiple processes access the same database.
501
+
502
+ This method is not used in the latest versions of Trackio (replaced by bulk_log) but
503
+ is kept for backwards compatibility for users who are connecting to a newer version of
504
+ a Trackio Spaces dashboard with an older version of Trackio installed locally.
505
+ """
506
+ db_path = SQLiteStorage.init_db(project)
507
+ with SQLiteStorage._get_process_lock(project):
508
+ with SQLiteStorage._get_connection(db_path) as conn:
509
+ cursor = conn.cursor()
510
+ cursor.execute(
511
+ """
512
+ SELECT MAX(step)
513
+ FROM metrics
514
+ WHERE run_name = ?
515
+ """,
516
+ (run,),
517
+ )
518
+ last_step = cursor.fetchone()[0]
519
+ current_step = (
520
+ 0
521
+ if step is None and last_step is None
522
+ else (step if step is not None else last_step + 1)
523
+ )
524
+ current_timestamp = datetime.now(timezone.utc).isoformat()
525
+ cursor.execute(
526
+ """
527
+ INSERT INTO metrics
528
+ (timestamp, run_name, step, metrics)
529
+ VALUES (?, ?, ?, ?)
530
+ """,
531
+ (
532
+ current_timestamp,
533
+ run,
534
+ current_step,
535
+ orjson.dumps(serialize_values(metrics)),
536
+ ),
537
+ )
538
+ conn.commit()
539
+
540
+ @staticmethod
541
+ def bulk_log(
542
+ project: str,
543
+ run: str,
544
+ metrics_list: list[dict],
545
+ steps: list[int] | None = None,
546
+ timestamps: list[str] | None = None,
547
+ config: dict | None = None,
548
+ log_ids: list[str] | None = None,
549
+ space_id: str | None = None,
550
+ ):
551
+ """
552
+ Safely log bulk metrics to the database. Before logging, this method will ensure the database exists
553
+ and is set up with the correct tables. It also uses a cross-process lock to prevent
554
+ database locking errors when multiple processes access the same database.
555
+ """
556
+ if not metrics_list:
557
+ return
558
+
559
+ if timestamps is None:
560
+ timestamps = [datetime.now(timezone.utc).isoformat()] * len(metrics_list)
561
+
562
+ db_path = SQLiteStorage.init_db(project)
563
+ with SQLiteStorage._get_process_lock(project):
564
+ with SQLiteStorage._get_connection(db_path) as conn:
565
+ cursor = conn.cursor()
566
+
567
+ if steps is None:
568
+ steps = list(range(len(metrics_list)))
569
+ elif any(s is None for s in steps):
570
+ cursor.execute(
571
+ "SELECT MAX(step) FROM metrics WHERE run_name = ?", (run,)
572
+ )
573
+ last_step = cursor.fetchone()[0]
574
+ current_step = 0 if last_step is None else last_step + 1
575
+ processed_steps = []
576
+ for step in steps:
577
+ if step is None:
578
+ processed_steps.append(current_step)
579
+ current_step += 1
580
+ else:
581
+ processed_steps.append(step)
582
+ steps = processed_steps
583
+
584
+ if len(metrics_list) != len(steps) or len(metrics_list) != len(
585
+ timestamps
586
+ ):
587
+ raise ValueError(
588
+ "metrics_list, steps, and timestamps must have the same length"
589
+ )
590
+
591
+ data = []
592
+ for i, metrics in enumerate(metrics_list):
593
+ lid = log_ids[i] if log_ids else None
594
+ data.append(
595
+ (
596
+ timestamps[i],
597
+ run,
598
+ steps[i],
599
+ orjson.dumps(serialize_values(metrics)),
600
+ lid,
601
+ space_id,
602
+ )
603
+ )
604
+
605
+ cursor.executemany(
606
+ """
607
+ INSERT OR IGNORE INTO metrics
608
+ (timestamp, run_name, step, metrics, log_id, space_id)
609
+ VALUES (?, ?, ?, ?, ?, ?)
610
+ """,
611
+ data,
612
+ )
613
+
614
+ if config:
615
+ current_timestamp = datetime.now(timezone.utc).isoformat()
616
+ cursor.execute(
617
+ """
618
+ INSERT OR REPLACE INTO configs
619
+ (run_name, config, created_at)
620
+ VALUES (?, ?, ?)
621
+ """,
622
+ (
623
+ run,
624
+ orjson.dumps(serialize_values(config)),
625
+ current_timestamp,
626
+ ),
627
+ )
628
+
629
+ conn.commit()
630
+
631
+ @staticmethod
632
+ def bulk_log_system(
633
+ project: str,
634
+ run: str,
635
+ metrics_list: list[dict],
636
+ timestamps: list[str] | None = None,
637
+ log_ids: list[str] | None = None,
638
+ space_id: str | None = None,
639
+ ):
640
+ """
641
+ Log system metrics (GPU, etc.) to the database without step numbers.
642
+ These metrics use timestamps for the x-axis instead of steps.
643
+ """
644
+ if not metrics_list:
645
+ return
646
+
647
+ if timestamps is None:
648
+ timestamps = [datetime.now(timezone.utc).isoformat()] * len(metrics_list)
649
+
650
+ if len(metrics_list) != len(timestamps):
651
+ raise ValueError("metrics_list and timestamps must have the same length")
652
+
653
+ db_path = SQLiteStorage.init_db(project)
654
+ with SQLiteStorage._get_process_lock(project):
655
+ with SQLiteStorage._get_connection(db_path) as conn:
656
+ cursor = conn.cursor()
657
+ data = []
658
+ for i, metrics in enumerate(metrics_list):
659
+ lid = log_ids[i] if log_ids else None
660
+ data.append(
661
+ (
662
+ timestamps[i],
663
+ run,
664
+ orjson.dumps(serialize_values(metrics)),
665
+ lid,
666
+ space_id,
667
+ )
668
+ )
669
+
670
+ cursor.executemany(
671
+ """
672
+ INSERT OR IGNORE INTO system_metrics
673
+ (timestamp, run_name, metrics, log_id, space_id)
674
+ VALUES (?, ?, ?, ?, ?)
675
+ """,
676
+ data,
677
+ )
678
+ conn.commit()
679
+
680
+ @staticmethod
681
+ def bulk_alert(
682
+ project: str,
683
+ run: str,
684
+ titles: list[str],
685
+ texts: list[str | None],
686
+ levels: list[str],
687
+ steps: list[int | None],
688
+ timestamps: list[str] | None = None,
689
+ alert_ids: list[str] | None = None,
690
+ ):
691
+ if not titles:
692
+ return
693
+
694
+ if timestamps is None:
695
+ timestamps = [datetime.now(timezone.utc).isoformat()] * len(titles)
696
+
697
+ db_path = SQLiteStorage.init_db(project)
698
+ with SQLiteStorage._get_process_lock(project):
699
+ with SQLiteStorage._get_connection(db_path) as conn:
700
+ cursor = conn.cursor()
701
+ data = []
702
+ for i in range(len(titles)):
703
+ aid = alert_ids[i] if alert_ids else None
704
+ data.append(
705
+ (
706
+ timestamps[i],
707
+ run,
708
+ titles[i],
709
+ texts[i],
710
+ levels[i],
711
+ steps[i],
712
+ aid,
713
+ )
714
+ )
715
+
716
+ cursor.executemany(
717
+ """
718
+ INSERT OR IGNORE INTO alerts
719
+ (timestamp, run_name, title, text, level, step, alert_id)
720
+ VALUES (?, ?, ?, ?, ?, ?, ?)
721
+ """,
722
+ data,
723
+ )
724
+ conn.commit()
725
+
726
+ @staticmethod
727
+ def get_alerts(
728
+ project: str,
729
+ run_name: str | None = None,
730
+ level: str | None = None,
731
+ since: str | None = None,
732
+ ) -> list[dict]:
733
+ db_path = SQLiteStorage.get_project_db_path(project)
734
+ if not db_path.exists():
735
+ return []
736
+
737
+ with SQLiteStorage._get_connection(db_path) as conn:
738
+ cursor = conn.cursor()
739
+ try:
740
+ query = (
741
+ "SELECT timestamp, run_name, title, text, level, step FROM alerts"
742
+ )
743
+ conditions = []
744
+ params = []
745
+ if run_name is not None:
746
+ conditions.append("run_name = ?")
747
+ params.append(run_name)
748
+ if level is not None:
749
+ conditions.append("level = ?")
750
+ params.append(level)
751
+ if since is not None:
752
+ conditions.append("timestamp > ?")
753
+ params.append(since)
754
+ if conditions:
755
+ query += " WHERE " + " AND ".join(conditions)
756
+ query += " ORDER BY timestamp DESC"
757
+ cursor.execute(query, params)
758
+
759
+ rows = cursor.fetchall()
760
+ return [
761
+ {
762
+ "timestamp": row["timestamp"],
763
+ "run": row["run_name"],
764
+ "title": row["title"],
765
+ "text": row["text"],
766
+ "level": row["level"],
767
+ "step": row["step"],
768
+ }
769
+ for row in rows
770
+ ]
771
+ except sqlite3.OperationalError as e:
772
+ if "no such table: alerts" in str(e):
773
+ return []
774
+ raise
775
+
776
+ @staticmethod
777
+ def get_alert_count(project: str) -> int:
778
+ db_path = SQLiteStorage.get_project_db_path(project)
779
+ if not db_path.exists():
780
+ return 0
781
+
782
+ with SQLiteStorage._get_connection(db_path) as conn:
783
+ cursor = conn.cursor()
784
+ try:
785
+ cursor.execute("SELECT COUNT(*) FROM alerts")
786
+ return cursor.fetchone()[0]
787
+ except sqlite3.OperationalError:
788
+ return 0
789
+
790
+ @staticmethod
791
+ def get_system_logs(project: str, run: str) -> list[dict]:
792
+ """Retrieve system metrics for a specific run. Returns metrics with timestamps (no steps)."""
793
+ db_path = SQLiteStorage.get_project_db_path(project)
794
+ if not db_path.exists():
795
+ return []
796
+
797
+ with SQLiteStorage._get_connection(db_path) as conn:
798
+ cursor = conn.cursor()
799
+ try:
800
+ cursor.execute(
801
+ """
802
+ SELECT timestamp, metrics
803
+ FROM system_metrics
804
+ WHERE run_name = ?
805
+ ORDER BY timestamp
806
+ """,
807
+ (run,),
808
+ )
809
+
810
+ rows = cursor.fetchall()
811
+ results = []
812
+ for row in rows:
813
+ metrics = orjson.loads(row["metrics"])
814
+ metrics = deserialize_values(metrics)
815
+ metrics["timestamp"] = row["timestamp"]
816
+ results.append(metrics)
817
+ return results
818
+ except sqlite3.OperationalError as e:
819
+ if "no such table: system_metrics" in str(e):
820
+ return []
821
+ raise
822
+
823
+ @staticmethod
824
+ def get_all_system_metrics_for_run(project: str, run: str) -> list[str]:
825
+ """Get all system metric names for a specific project/run."""
826
+ return SQLiteStorage._get_metric_names(
827
+ project, run, "system_metrics", exclude_keys={"timestamp"}
828
+ )
829
+
830
+ @staticmethod
831
+ def has_system_metrics(project: str) -> bool:
832
+ """Check if a project has any system metrics logged."""
833
+ db_path = SQLiteStorage.get_project_db_path(project)
834
+ if not db_path.exists():
835
+ return False
836
+
837
+ with SQLiteStorage._get_connection(db_path) as conn:
838
+ cursor = conn.cursor()
839
+ try:
840
+ cursor.execute("SELECT COUNT(*) FROM system_metrics LIMIT 1")
841
+ count = cursor.fetchone()[0]
842
+ return count > 0
843
+ except sqlite3.OperationalError:
844
+ return False
845
+
846
+ @staticmethod
847
+ def get_log_count(project: str, run: str) -> int:
848
+ db_path = SQLiteStorage.get_project_db_path(project)
849
+ if not db_path.exists():
850
+ return 0
851
+ try:
852
+ with SQLiteStorage._get_connection(db_path) as conn:
853
+ cursor = conn.cursor()
854
+ cursor.execute(
855
+ "SELECT COUNT(*) FROM metrics WHERE run_name = ?",
856
+ (run,),
857
+ )
858
+ return cursor.fetchone()[0]
859
+ except sqlite3.OperationalError as e:
860
+ if "no such table: metrics" in str(e):
861
+ return 0
862
+ raise
863
+
864
+ @staticmethod
865
+ def get_last_step(project: str, run: str) -> int | None:
866
+ db_path = SQLiteStorage.get_project_db_path(project)
867
+ if not db_path.exists():
868
+ return None
869
+ try:
870
+ with SQLiteStorage._get_connection(db_path) as conn:
871
+ cursor = conn.cursor()
872
+ cursor.execute(
873
+ "SELECT MAX(step) FROM metrics WHERE run_name = ?",
874
+ (run,),
875
+ )
876
+ row = cursor.fetchone()
877
+ return row[0] if row and row[0] is not None else None
878
+ except sqlite3.OperationalError as e:
879
+ if "no such table: metrics" in str(e):
880
+ return None
881
+ raise
882
+
883
+ @staticmethod
884
+ def get_logs(project: str, run: str, max_points: int | None = None) -> list[dict]:
885
+ """Retrieve logs for a specific run. Logs include the step count (int) and the timestamp (datetime object)."""
886
+ db_path = SQLiteStorage.get_project_db_path(project)
887
+ if not db_path.exists():
888
+ return []
889
+
890
+ try:
891
+ with SQLiteStorage._get_connection(db_path) as conn:
892
+ cursor = conn.cursor()
893
+ cursor.execute(
894
+ """
895
+ SELECT timestamp, step, metrics
896
+ FROM metrics
897
+ WHERE run_name = ?
898
+ ORDER BY timestamp
899
+ """,
900
+ (run,),
901
+ )
902
+
903
+ rows = cursor.fetchall()
904
+ if max_points is not None and len(rows) > max_points:
905
+ step = len(rows) / max_points
906
+ indices = {int(i * step) for i in range(max_points)}
907
+ indices.add(len(rows) - 1)
908
+ rows = [rows[i] for i in sorted(indices)]
909
+
910
+ results = []
911
+ for row in rows:
912
+ metrics = orjson.loads(row["metrics"])
913
+ metrics = deserialize_values(metrics)
914
+ metrics["timestamp"] = row["timestamp"]
915
+ metrics["step"] = row["step"]
916
+ results.append(metrics)
917
+ return results
918
+ except sqlite3.OperationalError as e:
919
+ if "no such table: metrics" in str(e):
920
+ return []
921
+ raise
922
+
923
+ @staticmethod
924
+ def load_from_dataset():
925
+ dataset_id = os.environ.get("TRACKIO_DATASET_ID")
926
+ space_repo_name = os.environ.get("SPACE_REPO_NAME")
927
+ if dataset_id is not None and space_repo_name is not None:
928
+ hfapi = hf.HfApi()
929
+ updated = False
930
+ if not TRACKIO_DIR.exists():
931
+ TRACKIO_DIR.mkdir(parents=True, exist_ok=True)
932
+ with SQLiteStorage.get_scheduler().lock:
933
+ try:
934
+ files = hfapi.list_repo_files(dataset_id, repo_type="dataset")
935
+ for file in files:
936
+ # Download parquet and media assets
937
+ if not (file.endswith(".parquet") or file.startswith("media/")):
938
+ continue
939
+ if (TRACKIO_DIR / file).exists():
940
+ continue
941
+ hf.hf_hub_download(
942
+ dataset_id, file, repo_type="dataset", local_dir=TRACKIO_DIR
943
+ )
944
+ updated = True
945
+ except hf.errors.EntryNotFoundError:
946
+ pass
947
+ except hf.errors.RepositoryNotFoundError:
948
+ pass
949
+ if updated:
950
+ SQLiteStorage.import_from_parquet()
951
+ SQLiteStorage._dataset_import_attempted = True
952
+
953
+ @staticmethod
954
+ def get_projects() -> list[str]:
955
+ """
956
+ Get list of all projects by scanning the database files in the trackio directory.
957
+ """
958
+ if not SQLiteStorage._dataset_import_attempted:
959
+ SQLiteStorage.load_from_dataset()
960
+
961
+ projects: set[str] = set()
962
+ if not TRACKIO_DIR.exists():
963
+ return []
964
+
965
+ for db_file in TRACKIO_DIR.glob(f"*{DB_EXT}"):
966
+ project_name = db_file.stem
967
+ projects.add(project_name)
968
+ return sorted(projects)
969
+
970
+ @staticmethod
971
+ def get_runs(project: str) -> list[str]:
972
+ """Get list of all runs for a project, ordered by creation time."""
973
+ db_path = SQLiteStorage.get_project_db_path(project)
974
+ if not db_path.exists():
975
+ return []
976
+
977
+ try:
978
+ with SQLiteStorage._get_connection(db_path) as conn:
979
+ cursor = conn.cursor()
980
+ cursor.execute(
981
+ """
982
+ SELECT run_name
983
+ FROM metrics
984
+ GROUP BY run_name
985
+ ORDER BY MIN(timestamp) ASC
986
+ """,
987
+ )
988
+ return [row[0] for row in cursor.fetchall()]
989
+ except sqlite3.OperationalError as e:
990
+ if "no such table: metrics" in str(e):
991
+ return []
992
+ raise
993
+
994
+ @staticmethod
995
+ def get_max_steps_for_runs(project: str) -> dict[str, int]:
996
+ """Get the maximum step for each run in a project."""
997
+ db_path = SQLiteStorage.get_project_db_path(project)
998
+ if not db_path.exists():
999
+ return {}
1000
+
1001
+ try:
1002
+ with SQLiteStorage._get_connection(db_path) as conn:
1003
+ cursor = conn.cursor()
1004
+ cursor.execute(
1005
+ """
1006
+ SELECT run_name, MAX(step) as max_step
1007
+ FROM metrics
1008
+ GROUP BY run_name
1009
+ """
1010
+ )
1011
+
1012
+ results = {}
1013
+ for row in cursor.fetchall():
1014
+ results[row["run_name"]] = row["max_step"]
1015
+
1016
+ return results
1017
+ except sqlite3.OperationalError as e:
1018
+ if "no such table: metrics" in str(e):
1019
+ return {}
1020
+ raise
1021
+
1022
+ @staticmethod
1023
+ def get_max_step_for_run(project: str, run: str) -> int | None:
1024
+ """Get the maximum step for a specific run, or None if no logs exist."""
1025
+ db_path = SQLiteStorage.get_project_db_path(project)
1026
+ if not db_path.exists():
1027
+ return None
1028
+
1029
+ try:
1030
+ with SQLiteStorage._get_connection(db_path) as conn:
1031
+ cursor = conn.cursor()
1032
+ cursor.execute(
1033
+ "SELECT MAX(step) FROM metrics WHERE run_name = ?", (run,)
1034
+ )
1035
+ result = cursor.fetchone()[0]
1036
+ return result
1037
+ except sqlite3.OperationalError as e:
1038
+ if "no such table: metrics" in str(e):
1039
+ return None
1040
+ raise
1041
+
1042
+ @staticmethod
1043
+ def get_run_config(project: str, run: str) -> dict | None:
1044
+ """Get configuration for a specific run."""
1045
+ db_path = SQLiteStorage.get_project_db_path(project)
1046
+ if not db_path.exists():
1047
+ return None
1048
+
1049
+ with SQLiteStorage._get_connection(db_path) as conn:
1050
+ cursor = conn.cursor()
1051
+ try:
1052
+ cursor.execute(
1053
+ """
1054
+ SELECT config FROM configs WHERE run_name = ?
1055
+ """,
1056
+ (run,),
1057
+ )
1058
+
1059
+ row = cursor.fetchone()
1060
+ if row:
1061
+ config = orjson.loads(row["config"])
1062
+ return deserialize_values(config)
1063
+ return None
1064
+ except sqlite3.OperationalError as e:
1065
+ if "no such table: configs" in str(e):
1066
+ return None
1067
+ raise
1068
+
1069
+ @staticmethod
1070
+ def delete_run(project: str, run: str) -> bool:
1071
+ """Delete a run from the database (metrics, config, and system_metrics)."""
1072
+ db_path = SQLiteStorage.get_project_db_path(project)
1073
+ if not db_path.exists():
1074
+ return False
1075
+
1076
+ with SQLiteStorage._get_process_lock(project):
1077
+ with SQLiteStorage._get_connection(db_path) as conn:
1078
+ cursor = conn.cursor()
1079
+ try:
1080
+ cursor.execute("DELETE FROM metrics WHERE run_name = ?", (run,))
1081
+ cursor.execute("DELETE FROM configs WHERE run_name = ?", (run,))
1082
+ try:
1083
+ cursor.execute(
1084
+ "DELETE FROM system_metrics WHERE run_name = ?", (run,)
1085
+ )
1086
+ except sqlite3.OperationalError:
1087
+ pass
1088
+ try:
1089
+ cursor.execute("DELETE FROM alerts WHERE run_name = ?", (run,))
1090
+ except sqlite3.OperationalError:
1091
+ pass
1092
+ conn.commit()
1093
+ return True
1094
+ except sqlite3.Error:
1095
+ return False
1096
+
1097
+ @staticmethod
1098
+ def _update_media_paths(obj, old_prefix, new_prefix):
1099
+ """Update media file paths in nested data structures."""
1100
+ if isinstance(obj, dict):
1101
+ if obj.get("_type") in [
1102
+ "trackio.image",
1103
+ "trackio.video",
1104
+ "trackio.audio",
1105
+ ]:
1106
+ old_path = obj.get("file_path", "")
1107
+ if isinstance(old_path, str):
1108
+ normalized_path = old_path.replace("\\", "/")
1109
+ if normalized_path.startswith(old_prefix):
1110
+ new_path = normalized_path.replace(old_prefix, new_prefix, 1)
1111
+ return {**obj, "file_path": new_path}
1112
+ return {
1113
+ key: SQLiteStorage._update_media_paths(value, old_prefix, new_prefix)
1114
+ for key, value in obj.items()
1115
+ }
1116
+ elif isinstance(obj, list):
1117
+ return [
1118
+ SQLiteStorage._update_media_paths(item, old_prefix, new_prefix)
1119
+ for item in obj
1120
+ ]
1121
+ return obj
1122
+
1123
+ @staticmethod
1124
+ def _rewrite_metrics_rows(metrics_rows, new_run_name, old_prefix, new_prefix):
1125
+ """Deserialize metrics rows, update media paths, and reserialize."""
1126
+ result = []
1127
+ for row in metrics_rows:
1128
+ metrics_data = orjson.loads(row["metrics"])
1129
+ metrics_deserialized = deserialize_values(metrics_data)
1130
+ updated = SQLiteStorage._update_media_paths(
1131
+ metrics_deserialized, old_prefix, new_prefix
1132
+ )
1133
+ result.append(
1134
+ (
1135
+ row["timestamp"],
1136
+ new_run_name,
1137
+ row["step"],
1138
+ orjson.dumps(serialize_values(updated)),
1139
+ )
1140
+ )
1141
+ return result
1142
+
1143
+ @staticmethod
1144
+ def _move_media_dir(source: Path, target: Path):
1145
+ """Move a media directory from source to target."""
1146
+ if source.exists():
1147
+ target.parent.mkdir(parents=True, exist_ok=True)
1148
+ if target.exists():
1149
+ shutil.rmtree(target)
1150
+ shutil.move(str(source), str(target))
1151
+
1152
+ @staticmethod
1153
+ def rename_run(project: str, old_name: str, new_name: str) -> None:
1154
+ """Rename a run within the same project.
1155
+
1156
+ Raises:
1157
+ ValueError: If the new name is empty, the old run doesn't exist,
1158
+ or a run with the new name already exists.
1159
+ RuntimeError: If the database operation fails.
1160
+ """
1161
+ if not new_name or not new_name.strip():
1162
+ raise ValueError("New run name cannot be empty")
1163
+
1164
+ new_name = new_name.strip()
1165
+
1166
+ db_path = SQLiteStorage.get_project_db_path(project)
1167
+ if not db_path.exists():
1168
+ raise ValueError(f"Project '{project}' does not exist")
1169
+
1170
+ with SQLiteStorage._get_process_lock(project):
1171
+ with SQLiteStorage._get_connection(db_path) as conn:
1172
+ cursor = conn.cursor()
1173
+
1174
+ cursor.execute(
1175
+ "SELECT COUNT(*) FROM metrics WHERE run_name = ?", (old_name,)
1176
+ )
1177
+ if cursor.fetchone()[0] == 0:
1178
+ raise ValueError(
1179
+ f"Run '{old_name}' does not exist in project '{project}'"
1180
+ )
1181
+
1182
+ cursor.execute(
1183
+ "SELECT COUNT(*) FROM metrics WHERE run_name = ?", (new_name,)
1184
+ )
1185
+ if cursor.fetchone()[0] > 0:
1186
+ raise ValueError(
1187
+ f"A run named '{new_name}' already exists in project '{project}'"
1188
+ )
1189
+
1190
+ try:
1191
+ cursor.execute(
1192
+ "SELECT timestamp, step, metrics FROM metrics WHERE run_name = ?",
1193
+ (old_name,),
1194
+ )
1195
+ metrics_rows = cursor.fetchall()
1196
+
1197
+ old_prefix = f"{project}/{old_name}/"
1198
+ new_prefix = f"{project}/{new_name}/"
1199
+
1200
+ updated_rows = SQLiteStorage._rewrite_metrics_rows(
1201
+ metrics_rows, new_name, old_prefix, new_prefix
1202
+ )
1203
+
1204
+ cursor.execute(
1205
+ "DELETE FROM metrics WHERE run_name = ?", (old_name,)
1206
+ )
1207
+ cursor.executemany(
1208
+ "INSERT INTO metrics (timestamp, run_name, step, metrics) VALUES (?, ?, ?, ?)",
1209
+ updated_rows,
1210
+ )
1211
+
1212
+ cursor.execute(
1213
+ "UPDATE configs SET run_name = ? WHERE run_name = ?",
1214
+ (new_name, old_name),
1215
+ )
1216
+
1217
+ try:
1218
+ cursor.execute(
1219
+ "UPDATE system_metrics SET run_name = ? WHERE run_name = ?",
1220
+ (new_name, old_name),
1221
+ )
1222
+ except sqlite3.OperationalError:
1223
+ pass
1224
+
1225
+ try:
1226
+ cursor.execute(
1227
+ "UPDATE alerts SET run_name = ? WHERE run_name = ?",
1228
+ (new_name, old_name),
1229
+ )
1230
+ except sqlite3.OperationalError:
1231
+ pass
1232
+
1233
+ conn.commit()
1234
+
1235
+ SQLiteStorage._move_media_dir(
1236
+ MEDIA_DIR / project / old_name,
1237
+ MEDIA_DIR / project / new_name,
1238
+ )
1239
+ except sqlite3.Error as e:
1240
+ raise RuntimeError(
1241
+ f"Database error while renaming run '{old_name}' to '{new_name}': {e}"
1242
+ ) from e
1243
+
1244
+ @staticmethod
1245
+ def move_run(project: str, run: str, new_project: str) -> bool:
1246
+ """Move a run from one project to another."""
1247
+ source_db_path = SQLiteStorage.get_project_db_path(project)
1248
+ if not source_db_path.exists():
1249
+ return False
1250
+
1251
+ target_db_path = SQLiteStorage.init_db(new_project)
1252
+
1253
+ with SQLiteStorage._get_process_lock(project):
1254
+ with SQLiteStorage._get_process_lock(new_project):
1255
+ with SQLiteStorage._get_connection(source_db_path) as source_conn:
1256
+ source_cursor = source_conn.cursor()
1257
+
1258
+ source_cursor.execute(
1259
+ "SELECT timestamp, step, metrics FROM metrics WHERE run_name = ?",
1260
+ (run,),
1261
+ )
1262
+ metrics_rows = source_cursor.fetchall()
1263
+
1264
+ source_cursor.execute(
1265
+ "SELECT config, created_at FROM configs WHERE run_name = ?",
1266
+ (run,),
1267
+ )
1268
+ config_row = source_cursor.fetchone()
1269
+
1270
+ try:
1271
+ source_cursor.execute(
1272
+ "SELECT timestamp, metrics FROM system_metrics WHERE run_name = ?",
1273
+ (run,),
1274
+ )
1275
+ system_metrics_rows = source_cursor.fetchall()
1276
+ except sqlite3.OperationalError:
1277
+ system_metrics_rows = []
1278
+
1279
+ try:
1280
+ source_cursor.execute(
1281
+ "SELECT timestamp, title, text, level, step, alert_id FROM alerts WHERE run_name = ?",
1282
+ (run,),
1283
+ )
1284
+ alert_rows = source_cursor.fetchall()
1285
+ except sqlite3.OperationalError:
1286
+ alert_rows = []
1287
+
1288
+ if not metrics_rows and not config_row and not system_metrics_rows:
1289
+ return False
1290
+
1291
+ with SQLiteStorage._get_connection(target_db_path) as target_conn:
1292
+ target_cursor = target_conn.cursor()
1293
+
1294
+ old_prefix = f"{project}/{run}/"
1295
+ new_prefix = f"{new_project}/{run}/"
1296
+ updated_rows = SQLiteStorage._rewrite_metrics_rows(
1297
+ metrics_rows, run, old_prefix, new_prefix
1298
+ )
1299
+
1300
+ target_cursor.executemany(
1301
+ "INSERT INTO metrics (timestamp, run_name, step, metrics) VALUES (?, ?, ?, ?)",
1302
+ updated_rows,
1303
+ )
1304
+
1305
+ if config_row:
1306
+ target_cursor.execute(
1307
+ """
1308
+ INSERT OR REPLACE INTO configs (run_name, config, created_at)
1309
+ VALUES (?, ?, ?)
1310
+ """,
1311
+ (run, config_row["config"], config_row["created_at"]),
1312
+ )
1313
+
1314
+ for row in system_metrics_rows:
1315
+ try:
1316
+ target_cursor.execute(
1317
+ """
1318
+ INSERT INTO system_metrics (timestamp, run_name, metrics)
1319
+ VALUES (?, ?, ?)
1320
+ """,
1321
+ (row["timestamp"], run, row["metrics"]),
1322
+ )
1323
+ except sqlite3.OperationalError:
1324
+ pass
1325
+
1326
+ for row in alert_rows:
1327
+ try:
1328
+ target_cursor.execute(
1329
+ """
1330
+ INSERT OR IGNORE INTO alerts (timestamp, run_name, title, text, level, step, alert_id)
1331
+ VALUES (?, ?, ?, ?, ?, ?, ?)
1332
+ """,
1333
+ (
1334
+ row["timestamp"],
1335
+ run,
1336
+ row["title"],
1337
+ row["text"],
1338
+ row["level"],
1339
+ row["step"],
1340
+ row["alert_id"],
1341
+ ),
1342
+ )
1343
+ except sqlite3.OperationalError:
1344
+ pass
1345
+
1346
+ target_conn.commit()
1347
+
1348
+ SQLiteStorage._move_media_dir(
1349
+ MEDIA_DIR / project / run,
1350
+ MEDIA_DIR / new_project / run,
1351
+ )
1352
+
1353
+ source_cursor.execute(
1354
+ "DELETE FROM metrics WHERE run_name = ?", (run,)
1355
+ )
1356
+ source_cursor.execute(
1357
+ "DELETE FROM configs WHERE run_name = ?", (run,)
1358
+ )
1359
+ try:
1360
+ source_cursor.execute(
1361
+ "DELETE FROM system_metrics WHERE run_name = ?", (run,)
1362
+ )
1363
+ except sqlite3.OperationalError:
1364
+ pass
1365
+ try:
1366
+ source_cursor.execute(
1367
+ "DELETE FROM alerts WHERE run_name = ?", (run,)
1368
+ )
1369
+ except sqlite3.OperationalError:
1370
+ pass
1371
+ source_conn.commit()
1372
+
1373
+ return True
1374
+
1375
+ @staticmethod
1376
+ def get_all_run_configs(project: str) -> dict[str, dict]:
1377
+ """Get configurations for all runs in a project."""
1378
+ db_path = SQLiteStorage.get_project_db_path(project)
1379
+ if not db_path.exists():
1380
+ return {}
1381
+
1382
+ with SQLiteStorage._get_connection(db_path) as conn:
1383
+ cursor = conn.cursor()
1384
+ try:
1385
+ cursor.execute(
1386
+ """
1387
+ SELECT run_name, config FROM configs
1388
+ """
1389
+ )
1390
+
1391
+ results = {}
1392
+ for row in cursor.fetchall():
1393
+ config = orjson.loads(row["config"])
1394
+ results[row["run_name"]] = deserialize_values(config)
1395
+ return results
1396
+ except sqlite3.OperationalError as e:
1397
+ if "no such table: configs" in str(e):
1398
+ return {}
1399
+ raise
1400
+
1401
+ @staticmethod
1402
+ def get_metric_values(
1403
+ project: str,
1404
+ run: str,
1405
+ metric_name: str,
1406
+ step: int | None = None,
1407
+ around_step: int | None = None,
1408
+ at_time: str | None = None,
1409
+ window: int | float | None = None,
1410
+ ) -> list[dict]:
1411
+ """Get values for a specific metric in a project/run with optional filtering.
1412
+
1413
+ Filtering modes:
1414
+ - step: return the single row at exactly this step
1415
+ - around_step + window: return rows where step is in [around_step - window, around_step + window]
1416
+ - at_time + window: return rows within ±window seconds of the ISO timestamp
1417
+ - No filters: return all rows
1418
+ """
1419
+ db_path = SQLiteStorage.get_project_db_path(project)
1420
+ if not db_path.exists():
1421
+ return []
1422
+
1423
+ with SQLiteStorage._get_connection(db_path) as conn:
1424
+ cursor = conn.cursor()
1425
+ query = "SELECT timestamp, step, metrics FROM metrics WHERE run_name = ?"
1426
+ params: list = [run]
1427
+
1428
+ if step is not None:
1429
+ query += " AND step = ?"
1430
+ params.append(step)
1431
+ elif around_step is not None and window is not None:
1432
+ query += " AND step >= ? AND step <= ?"
1433
+ params.extend([around_step - int(window), around_step + int(window)])
1434
+ elif at_time is not None and window is not None:
1435
+ query += (
1436
+ " AND timestamp >= datetime(?, '-' || ? || ' seconds')"
1437
+ " AND timestamp <= datetime(?, '+' || ? || ' seconds')"
1438
+ )
1439
+ params.extend([at_time, int(window), at_time, int(window)])
1440
+
1441
+ query += " ORDER BY timestamp"
1442
+ cursor.execute(query, params)
1443
+
1444
+ rows = cursor.fetchall()
1445
+ results = []
1446
+ for row in rows:
1447
+ metrics = orjson.loads(row["metrics"])
1448
+ metrics = deserialize_values(metrics)
1449
+ if metric_name in metrics:
1450
+ results.append(
1451
+ {
1452
+ "timestamp": row["timestamp"],
1453
+ "step": row["step"],
1454
+ "value": metrics[metric_name],
1455
+ }
1456
+ )
1457
+ return results
1458
+
1459
+ @staticmethod
1460
+ def get_snapshot(
1461
+ project: str,
1462
+ run: str,
1463
+ step: int | None = None,
1464
+ around_step: int | None = None,
1465
+ at_time: str | None = None,
1466
+ window: int | float | None = None,
1467
+ ) -> dict[str, list[dict]]:
1468
+ """Get all metrics at/around a point in time or step.
1469
+
1470
+ Returns a dict mapping metric names to lists of {timestamp, step, value}.
1471
+ """
1472
+ db_path = SQLiteStorage.get_project_db_path(project)
1473
+ if not db_path.exists():
1474
+ return {}
1475
+
1476
+ with SQLiteStorage._get_connection(db_path) as conn:
1477
+ cursor = conn.cursor()
1478
+ query = "SELECT timestamp, step, metrics FROM metrics WHERE run_name = ?"
1479
+ params: list = [run]
1480
+
1481
+ if step is not None:
1482
+ query += " AND step = ?"
1483
+ params.append(step)
1484
+ elif around_step is not None and window is not None:
1485
+ query += " AND step >= ? AND step <= ?"
1486
+ params.extend([around_step - int(window), around_step + int(window)])
1487
+ elif at_time is not None and window is not None:
1488
+ query += (
1489
+ " AND timestamp >= datetime(?, '-' || ? || ' seconds')"
1490
+ " AND timestamp <= datetime(?, '+' || ? || ' seconds')"
1491
+ )
1492
+ params.extend([at_time, int(window), at_time, int(window)])
1493
+
1494
+ query += " ORDER BY timestamp"
1495
+ cursor.execute(query, params)
1496
+
1497
+ result: dict[str, list[dict]] = {}
1498
+ for row in cursor.fetchall():
1499
+ metrics = orjson.loads(row["metrics"])
1500
+ metrics = deserialize_values(metrics)
1501
+ for key, value in metrics.items():
1502
+ if key not in result:
1503
+ result[key] = []
1504
+ result[key].append(
1505
+ {
1506
+ "timestamp": row["timestamp"],
1507
+ "step": row["step"],
1508
+ "value": value,
1509
+ }
1510
+ )
1511
+ return result
1512
+
1513
+ @staticmethod
1514
+ def get_all_metrics_for_run(project: str, run: str) -> list[str]:
1515
+ """Get all metric names for a specific project/run."""
1516
+ return SQLiteStorage._get_metric_names(
1517
+ project, run, "metrics", exclude_keys={"timestamp", "step"}
1518
+ )
1519
+
1520
+ @staticmethod
1521
+ def _get_metric_names(
1522
+ project: str, run: str, table: str, exclude_keys: set[str]
1523
+ ) -> list[str]:
1524
+ db_path = SQLiteStorage.get_project_db_path(project)
1525
+ if not db_path.exists():
1526
+ return []
1527
+
1528
+ with SQLiteStorage._get_connection(db_path) as conn:
1529
+ cursor = conn.cursor()
1530
+ try:
1531
+ cursor.execute(
1532
+ f"""
1533
+ SELECT metrics
1534
+ FROM {table}
1535
+ WHERE run_name = ?
1536
+ ORDER BY timestamp
1537
+ """,
1538
+ (run,),
1539
+ )
1540
+
1541
+ rows = cursor.fetchall()
1542
+ all_metrics = set()
1543
+ for row in rows:
1544
+ metrics = orjson.loads(row["metrics"])
1545
+ metrics = deserialize_values(metrics)
1546
+ for key in metrics.keys():
1547
+ if key not in exclude_keys:
1548
+ all_metrics.add(key)
1549
+ return sorted(list(all_metrics))
1550
+ except sqlite3.OperationalError as e:
1551
+ if f"no such table: {table}" in str(e):
1552
+ return []
1553
+ raise
1554
+
1555
+ @staticmethod
1556
+ def set_project_metadata(project: str, key: str, value: str) -> None:
1557
+ db_path = SQLiteStorage.init_db(project)
1558
+ with SQLiteStorage._get_process_lock(project):
1559
+ with SQLiteStorage._get_connection(db_path) as conn:
1560
+ conn.execute(
1561
+ "INSERT OR REPLACE INTO project_metadata (key, value) VALUES (?, ?)",
1562
+ (key, value),
1563
+ )
1564
+ conn.commit()
1565
+
1566
+ @staticmethod
1567
+ def get_project_metadata(project: str, key: str) -> str | None:
1568
+ db_path = SQLiteStorage.get_project_db_path(project)
1569
+ if not db_path.exists():
1570
+ return None
1571
+ with SQLiteStorage._get_connection(db_path) as conn:
1572
+ cursor = conn.cursor()
1573
+ try:
1574
+ cursor.execute(
1575
+ "SELECT value FROM project_metadata WHERE key = ?", (key,)
1576
+ )
1577
+ row = cursor.fetchone()
1578
+ return row[0] if row else None
1579
+ except sqlite3.OperationalError:
1580
+ return None
1581
+
1582
+ @staticmethod
1583
+ def get_space_id(project: str) -> str | None:
1584
+ return SQLiteStorage.get_project_metadata(project, "space_id")
1585
+
1586
+ @staticmethod
1587
+ def has_pending_data(project: str) -> bool:
1588
+ db_path = SQLiteStorage.get_project_db_path(project)
1589
+ if not db_path.exists():
1590
+ return False
1591
+ with SQLiteStorage._get_connection(db_path) as conn:
1592
+ cursor = conn.cursor()
1593
+ try:
1594
+ cursor.execute(
1595
+ "SELECT EXISTS(SELECT 1 FROM metrics WHERE space_id IS NOT NULL LIMIT 1)"
1596
+ )
1597
+ if cursor.fetchone()[0]:
1598
+ return True
1599
+ except sqlite3.OperationalError:
1600
+ pass
1601
+ try:
1602
+ cursor.execute(
1603
+ "SELECT EXISTS(SELECT 1 FROM system_metrics WHERE space_id IS NOT NULL LIMIT 1)"
1604
+ )
1605
+ if cursor.fetchone()[0]:
1606
+ return True
1607
+ except sqlite3.OperationalError:
1608
+ pass
1609
+ try:
1610
+ cursor.execute("SELECT EXISTS(SELECT 1 FROM pending_uploads LIMIT 1)")
1611
+ if cursor.fetchone()[0]:
1612
+ return True
1613
+ except sqlite3.OperationalError:
1614
+ pass
1615
+ return False
1616
+
1617
+ @staticmethod
1618
+ def get_pending_logs(project: str) -> dict | None:
1619
+ return SQLiteStorage._get_pending(
1620
+ project, "metrics", extra_fields=["step"], include_config=True
1621
+ )
1622
+
1623
+ @staticmethod
1624
+ def clear_pending_logs(project: str, metric_ids: list[int]) -> None:
1625
+ SQLiteStorage._clear_pending(project, "metrics", metric_ids)
1626
+
1627
+ @staticmethod
1628
+ def get_pending_system_logs(project: str) -> dict | None:
1629
+ return SQLiteStorage._get_pending(project, "system_metrics")
1630
+
1631
+ @staticmethod
1632
+ def _get_pending(
1633
+ project: str,
1634
+ table: str,
1635
+ extra_fields: list[str] | None = None,
1636
+ include_config: bool = False,
1637
+ ) -> dict | None:
1638
+ db_path = SQLiteStorage.get_project_db_path(project)
1639
+ if not db_path.exists():
1640
+ return None
1641
+ extra_cols = ", ".join(extra_fields) + ", " if extra_fields else ""
1642
+ with SQLiteStorage._get_connection(db_path) as conn:
1643
+ cursor = conn.cursor()
1644
+ try:
1645
+ cursor.execute(
1646
+ f"""SELECT id, timestamp, run_name, {extra_cols}metrics, log_id, space_id
1647
+ FROM {table} WHERE space_id IS NOT NULL"""
1648
+ )
1649
+ except sqlite3.OperationalError:
1650
+ return None
1651
+ rows = cursor.fetchall()
1652
+ if not rows:
1653
+ return None
1654
+ logs = []
1655
+ ids = []
1656
+ for row in rows:
1657
+ metrics = deserialize_values(orjson.loads(row["metrics"]))
1658
+ entry = {
1659
+ "project": project,
1660
+ "run": row["run_name"],
1661
+ "metrics": metrics,
1662
+ "timestamp": row["timestamp"],
1663
+ "log_id": row["log_id"],
1664
+ }
1665
+ for field in extra_fields or []:
1666
+ entry[field] = row[field]
1667
+ if include_config:
1668
+ entry["config"] = None
1669
+ logs.append(entry)
1670
+ ids.append(row["id"])
1671
+ return {"logs": logs, "ids": ids, "space_id": rows[0]["space_id"]}
1672
+
1673
+ @staticmethod
1674
+ def clear_pending_system_logs(project: str, metric_ids: list[int]) -> None:
1675
+ SQLiteStorage._clear_pending(project, "system_metrics", metric_ids)
1676
+
1677
+ @staticmethod
1678
+ def _clear_pending(project: str, table: str, ids: list[int]) -> None:
1679
+ if not ids:
1680
+ return
1681
+ db_path = SQLiteStorage.get_project_db_path(project)
1682
+ if not db_path.exists():
1683
+ return
1684
+ with SQLiteStorage._get_process_lock(project):
1685
+ with SQLiteStorage._get_connection(db_path) as conn:
1686
+ placeholders = ",".join("?" * len(ids))
1687
+ conn.execute(
1688
+ f"DELETE FROM {table} WHERE id IN ({placeholders})",
1689
+ ids,
1690
+ )
1691
+ conn.commit()
1692
+
1693
+ @staticmethod
1694
+ def get_pending_uploads(project: str) -> dict | None:
1695
+ db_path = SQLiteStorage.get_project_db_path(project)
1696
+ if not db_path.exists():
1697
+ return None
1698
+ with SQLiteStorage._get_connection(db_path) as conn:
1699
+ cursor = conn.cursor()
1700
+ try:
1701
+ cursor.execute(
1702
+ """SELECT id, space_id, run_name, step, file_path, relative_path
1703
+ FROM pending_uploads"""
1704
+ )
1705
+ except sqlite3.OperationalError:
1706
+ return None
1707
+ rows = cursor.fetchall()
1708
+ if not rows:
1709
+ return None
1710
+ uploads = []
1711
+ ids = []
1712
+ for row in rows:
1713
+ uploads.append(
1714
+ {
1715
+ "project": project,
1716
+ "run": row["run_name"],
1717
+ "step": row["step"],
1718
+ "file_path": row["file_path"],
1719
+ "relative_path": row["relative_path"],
1720
+ }
1721
+ )
1722
+ ids.append(row["id"])
1723
+ return {"uploads": uploads, "ids": ids, "space_id": rows[0]["space_id"]}
1724
+
1725
+ @staticmethod
1726
+ def clear_pending_uploads(project: str, upload_ids: list[int]) -> None:
1727
+ if not upload_ids:
1728
+ return
1729
+ db_path = SQLiteStorage.get_project_db_path(project)
1730
+ if not db_path.exists():
1731
+ return
1732
+ with SQLiteStorage._get_process_lock(project):
1733
+ with SQLiteStorage._get_connection(db_path) as conn:
1734
+ placeholders = ",".join("?" * len(upload_ids))
1735
+ conn.execute(
1736
+ f"DELETE FROM pending_uploads WHERE id IN ({placeholders})",
1737
+ upload_ids,
1738
+ )
1739
+ conn.commit()
1740
+
1741
+ @staticmethod
1742
+ def add_pending_upload(
1743
+ project: str,
1744
+ space_id: str,
1745
+ run_name: str | None,
1746
+ step: int | None,
1747
+ file_path: str,
1748
+ relative_path: str | None,
1749
+ ) -> None:
1750
+ db_path = SQLiteStorage.init_db(project)
1751
+ with SQLiteStorage._get_process_lock(project):
1752
+ with SQLiteStorage._get_connection(db_path) as conn:
1753
+ conn.execute(
1754
+ """INSERT INTO pending_uploads
1755
+ (space_id, run_name, step, file_path, relative_path, created_at)
1756
+ VALUES (?, ?, ?, ?, ?, ?)""",
1757
+ (
1758
+ space_id,
1759
+ run_name,
1760
+ step,
1761
+ file_path,
1762
+ relative_path,
1763
+ datetime.now(timezone.utc).isoformat(),
1764
+ ),
1765
+ )
1766
+ conn.commit()
1767
+
1768
+ @staticmethod
1769
+ def get_all_logs_for_sync(project: str) -> list[dict]:
1770
+ return SQLiteStorage._get_all_for_sync(
1771
+ project,
1772
+ "metrics",
1773
+ order_by="run_name, step",
1774
+ extra_fields=["step"],
1775
+ include_config=True,
1776
+ )
1777
+
1778
+ @staticmethod
1779
+ def get_all_system_logs_for_sync(project: str) -> list[dict]:
1780
+ return SQLiteStorage._get_all_for_sync(
1781
+ project, "system_metrics", order_by="run_name, timestamp"
1782
+ )
1783
+
1784
+ @staticmethod
1785
+ def _get_all_for_sync(
1786
+ project: str,
1787
+ table: str,
1788
+ order_by: str,
1789
+ extra_fields: list[str] | None = None,
1790
+ include_config: bool = False,
1791
+ ) -> list[dict]:
1792
+ db_path = SQLiteStorage.get_project_db_path(project)
1793
+ if not db_path.exists():
1794
+ return []
1795
+ extra_cols = ", ".join(extra_fields) + ", " if extra_fields else ""
1796
+ with SQLiteStorage._get_connection(db_path) as conn:
1797
+ cursor = conn.cursor()
1798
+ try:
1799
+ cursor.execute(
1800
+ f"""SELECT timestamp, run_name, {extra_cols}metrics, log_id
1801
+ FROM {table} ORDER BY {order_by}"""
1802
+ )
1803
+ except sqlite3.OperationalError:
1804
+ return []
1805
+ rows = cursor.fetchall()
1806
+ results = []
1807
+ for row in rows:
1808
+ metrics = deserialize_values(orjson.loads(row["metrics"]))
1809
+ entry = {
1810
+ "project": project,
1811
+ "run": row["run_name"],
1812
+ "metrics": metrics,
1813
+ "timestamp": row["timestamp"],
1814
+ "log_id": row["log_id"],
1815
+ }
1816
+ for field in extra_fields or []:
1817
+ entry[field] = row[field]
1818
+ if include_config:
1819
+ entry["config"] = None
1820
+ results.append(entry)
1821
+ return results
trackio/table.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any, Literal
3
+
4
+ from pandas import DataFrame
5
+
6
+ from trackio.media.media import TrackioMedia
7
+ from trackio.utils import MEDIA_DIR
8
+
9
+
10
+ class Table:
11
+ """
12
+ Initializes a Table object.
13
+
14
+ Tables can be used to log tabular data including images, numbers, and text.
15
+
16
+ Args:
17
+ columns (`list[str]`, *optional*):
18
+ Names of the columns in the table. Optional if `data` is provided. Not
19
+ expected if `dataframe` is provided. Currently ignored.
20
+ data (`list[list[Any]]`, *optional*):
21
+ 2D row-oriented array of values. Each value can be a number, a string
22
+ (treated as Markdown and truncated if too long), or a `Trackio.Image` or
23
+ list of `Trackio.Image` objects.
24
+ dataframe (`pandas.DataFrame`, *optional*):
25
+ DataFrame used to create the table. When set, `data` and `columns`
26
+ arguments are ignored.
27
+ rows (`list[list[Any]]`, *optional*):
28
+ Currently ignored.
29
+ optional (`bool` or `list[bool]`, *optional*, defaults to `True`):
30
+ Currently ignored.
31
+ allow_mixed_types (`bool`, *optional*, defaults to `False`):
32
+ Currently ignored.
33
+ log_mode: (`Literal["IMMUTABLE", "MUTABLE", "INCREMENTAL"]` or `None`, *optional*, defaults to `"IMMUTABLE"`):
34
+ Currently ignored.
35
+ """
36
+
37
+ TYPE = "trackio.table"
38
+
39
+ def __init__(
40
+ self,
41
+ columns: list[str] | None = None,
42
+ data: list[list[Any]] | None = None,
43
+ dataframe: DataFrame | None = None,
44
+ rows: list[list[Any]] | None = None,
45
+ optional: bool | list[bool] = True,
46
+ allow_mixed_types: bool = False,
47
+ log_mode: Literal["IMMUTABLE", "MUTABLE", "INCREMENTAL"] | None = "IMMUTABLE",
48
+ ):
49
+ # TODO: implement support for columns, dtype, optional, allow_mixed_types, and log_mode.
50
+ # for now (like `rows`) they are included for API compat but don't do anything.
51
+ if dataframe is None:
52
+ self.data = DataFrame(data) if data is not None else DataFrame()
53
+ else:
54
+ self.data = dataframe
55
+
56
+ def _has_media_objects(self, dataframe: DataFrame) -> bool:
57
+ """Check if dataframe contains any TrackioMedia objects or lists of TrackioMedia objects."""
58
+ for col in dataframe.columns:
59
+ if dataframe[col].apply(lambda x: isinstance(x, TrackioMedia)).any():
60
+ return True
61
+ if (
62
+ dataframe[col]
63
+ .apply(
64
+ lambda x: (
65
+ isinstance(x, list)
66
+ and len(x) > 0
67
+ and isinstance(x[0], TrackioMedia)
68
+ )
69
+ )
70
+ .any()
71
+ ):
72
+ return True
73
+ return False
74
+
75
+ def _process_data(self, project: str, run: str, step: int = 0):
76
+ """Convert dataframe to dict format, processing any TrackioMedia objects if present."""
77
+ df = self.data
78
+ if not self._has_media_objects(df):
79
+ return df.to_dict(orient="records")
80
+
81
+ processed_df = df.copy()
82
+ for col in processed_df.columns:
83
+ for idx in processed_df.index:
84
+ value = processed_df.at[idx, col]
85
+ if isinstance(value, TrackioMedia):
86
+ value._save(project, run, step)
87
+ processed_df.at[idx, col] = value._to_dict()
88
+ if (
89
+ isinstance(value, list)
90
+ and len(value) > 0
91
+ and isinstance(value[0], TrackioMedia)
92
+ ):
93
+ [v._save(project, run, step) for v in value]
94
+ processed_df.at[idx, col] = [v._to_dict() for v in value]
95
+
96
+ return processed_df.to_dict(orient="records")
97
+
98
+ @staticmethod
99
+ def to_display_format(table_data: list[dict]) -> list[dict]:
100
+ """
101
+ Converts stored table data to display format for UI rendering.
102
+
103
+ Note:
104
+ This does not use the `self.data` attribute, but instead uses the
105
+ `table_data` parameter, which is what the UI receives.
106
+
107
+ Args:
108
+ table_data (`list[dict]`):
109
+ List of dictionaries representing table rows (from stored `_value`).
110
+
111
+ Returns:
112
+ `list[dict]`: Table data with images converted to markdown syntax and long
113
+ text truncated.
114
+ """
115
+ truncate_length = int(os.getenv("TRACKIO_TABLE_TRUNCATE_LENGTH", "250"))
116
+
117
+ def convert_image_to_markdown(image_data: dict) -> str:
118
+ relative_path = image_data.get("file_path", "")
119
+ caption = image_data.get("caption", "")
120
+ absolute_path = MEDIA_DIR / relative_path
121
+ return f'<img src="/gradio_api/file={absolute_path}" alt="{caption}" />'
122
+
123
+ processed_data = []
124
+ for row in table_data:
125
+ processed_row = {}
126
+ for key, value in row.items():
127
+ if isinstance(value, dict) and value.get("_type") == "trackio.image":
128
+ processed_row[key] = convert_image_to_markdown(value)
129
+ elif (
130
+ isinstance(value, list)
131
+ and len(value) > 0
132
+ and isinstance(value[0], dict)
133
+ and value[0].get("_type") == "trackio.image"
134
+ ):
135
+ # This assumes that if the first item is an image, all items are images. Ok for now since we don't support mixed types in a single cell.
136
+ processed_row[key] = (
137
+ '<div style="display: flex; gap: 10px;">'
138
+ + "".join([convert_image_to_markdown(item) for item in value])
139
+ + "</div>"
140
+ )
141
+ elif isinstance(value, str) and len(value) > truncate_length:
142
+ truncated = value[:truncate_length]
143
+ full_text = value.replace("<", "&lt;").replace(">", "&gt;")
144
+ processed_row[key] = (
145
+ f'<details style="display: inline;">'
146
+ f'<summary style="display: inline; cursor: pointer;">{truncated}…<span><em>(truncated, click to expand)</em></span></summary>'
147
+ f'<div style="margin-top: 10px; padding: 10px; background: #f5f5f5; border-radius: 4px; max-height: 400px; overflow: auto;">'
148
+ f'<pre style="white-space: pre-wrap; word-wrap: break-word; margin: 0;">{full_text}</pre>'
149
+ f"</div>"
150
+ f"</details>"
151
+ )
152
+ else:
153
+ processed_row[key] = value
154
+ processed_data.append(processed_row)
155
+ return processed_data
156
+
157
+ def _to_dict(self, project: str, run: str, step: int = 0):
158
+ """
159
+ Converts the table to a dictionary representation.
160
+
161
+ Args:
162
+ project (`str`):
163
+ Project name for saving media files.
164
+ run (`str`):
165
+ Run name for saving media files.
166
+ step (`int`, *optional*, defaults to `0`):
167
+ Step number for saving media files.
168
+ """
169
+ data = self._process_data(project, run, step)
170
+ return {
171
+ "_type": self.TYPE,
172
+ "_value": data,
173
+ }
trackio/typehints.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from gradio import FileData
4
+
5
+
6
+ class LogEntry(TypedDict, total=False):
7
+ project: str
8
+ run: str
9
+ metrics: dict[str, Any]
10
+ step: int | None
11
+ config: dict[str, Any] | None
12
+ log_id: str | None
13
+
14
+
15
+ class SystemLogEntry(TypedDict, total=False):
16
+ project: str
17
+ run: str
18
+ metrics: dict[str, Any]
19
+ timestamp: str
20
+ log_id: str | None
21
+
22
+
23
+ class AlertEntry(TypedDict, total=False):
24
+ project: str
25
+ run: str
26
+ title: str
27
+ text: str | None
28
+ level: str
29
+ step: int | None
30
+ timestamp: str
31
+ alert_id: str | None
32
+
33
+
34
+ class UploadEntry(TypedDict):
35
+ project: str
36
+ run: str | None
37
+ step: int | None
38
+ relative_path: str | None
39
+ uploaded_file: FileData
trackio/utils.py ADDED
@@ -0,0 +1,919 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import re
4
+ import secrets
5
+ import time
6
+ from datetime import datetime, timezone
7
+ from functools import lru_cache
8
+ from pathlib import Path
9
+ from typing import TYPE_CHECKING
10
+ from urllib.parse import urlencode
11
+
12
+ import huggingface_hub
13
+ import numpy as np
14
+ import pandas as pd
15
+ from huggingface_hub.constants import HF_HOME
16
+
17
+ if TYPE_CHECKING:
18
+ from trackio.commit_scheduler import CommitScheduler
19
+ from trackio.dummy_commit_scheduler import DummyCommitScheduler
20
+
21
+ RESERVED_KEYS = ["project", "run", "timestamp", "step", "time", "metrics"]
22
+
23
+ TRACKIO_LOGO_DIR = Path(__file__).parent / "assets"
24
+
25
+
26
+ def get_logo_urls() -> dict[str, str]:
27
+ """Get logo URLs from environment variables or use defaults."""
28
+ light_url = os.environ.get(
29
+ "TRACKIO_LOGO_LIGHT_URL",
30
+ f"/gradio_api/file={TRACKIO_LOGO_DIR}/trackio_logo_type_light_transparent.png",
31
+ )
32
+ dark_url = os.environ.get(
33
+ "TRACKIO_LOGO_DARK_URL",
34
+ f"/gradio_api/file={TRACKIO_LOGO_DIR}/trackio_logo_type_dark_transparent.png",
35
+ )
36
+ return {"light": light_url, "dark": dark_url}
37
+
38
+
39
+ def order_metrics_by_plot_preference(metrics: list[str]) -> tuple[list[str], dict]:
40
+ """
41
+ Order metrics based on TRACKIO_PLOT_ORDER environment variable and group them.
42
+
43
+ Args:
44
+ metrics: List of metric names to order and group
45
+
46
+ Returns:
47
+ Tuple of (ordered_group_names, grouped_metrics_dict)
48
+ """
49
+ plot_order_env = os.environ.get("TRACKIO_PLOT_ORDER", "")
50
+ if not plot_order_env.strip():
51
+ plot_order = []
52
+ else:
53
+ plot_order = [
54
+ item.strip() for item in plot_order_env.split(",") if item.strip()
55
+ ]
56
+
57
+ def get_metric_priority(metric: str) -> tuple[int, int, str]:
58
+ if not plot_order:
59
+ return (float("inf"), float("inf"), metric)
60
+
61
+ group_prefix = metric.split("/")[0] if "/" in metric else "charts"
62
+ no_match_priority = len(plot_order)
63
+
64
+ group_priority = no_match_priority
65
+ for i, pattern in enumerate(plot_order):
66
+ pattern_group = pattern.split("/")[0] if "/" in pattern else "charts"
67
+ if pattern_group == group_prefix:
68
+ group_priority = i
69
+ break
70
+
71
+ within_group_priority = no_match_priority
72
+ for i, pattern in enumerate(plot_order):
73
+ if pattern == metric:
74
+ within_group_priority = i
75
+ break
76
+ elif pattern.endswith("/*") and within_group_priority == no_match_priority:
77
+ pattern_prefix = pattern[:-2]
78
+ if metric.startswith(pattern_prefix + "/"):
79
+ within_group_priority = i + len(plot_order)
80
+
81
+ return (group_priority, within_group_priority, metric)
82
+
83
+ result = {}
84
+ for metric in metrics:
85
+ if "/" not in metric:
86
+ if "charts" not in result:
87
+ result["charts"] = {"direct_metrics": [], "subgroups": {}}
88
+ result["charts"]["direct_metrics"].append(metric)
89
+ else:
90
+ parts = metric.split("/")
91
+ main_prefix = parts[0]
92
+ if main_prefix not in result:
93
+ result[main_prefix] = {"direct_metrics": [], "subgroups": {}}
94
+ if len(parts) == 2:
95
+ result[main_prefix]["direct_metrics"].append(metric)
96
+ else:
97
+ subprefix = parts[1]
98
+ if subprefix not in result[main_prefix]["subgroups"]:
99
+ result[main_prefix]["subgroups"][subprefix] = []
100
+ result[main_prefix]["subgroups"][subprefix].append(metric)
101
+
102
+ for group_data in result.values():
103
+ group_data["direct_metrics"].sort(key=get_metric_priority)
104
+ for subgroup_name in group_data["subgroups"]:
105
+ group_data["subgroups"][subgroup_name].sort(key=get_metric_priority)
106
+
107
+ if "charts" in result and not result["charts"]["direct_metrics"]:
108
+ del result["charts"]
109
+
110
+ def get_group_priority(group_name: str) -> tuple[int, str]:
111
+ if not plot_order:
112
+ return (float("inf"), group_name)
113
+
114
+ min_priority = len(plot_order)
115
+ for i, pattern in enumerate(plot_order):
116
+ pattern_group = pattern.split("/")[0] if "/" in pattern else "charts"
117
+ if pattern_group == group_name:
118
+ min_priority = min(min_priority, i)
119
+ return (min_priority, group_name)
120
+
121
+ ordered_groups = sorted(result.keys(), key=get_group_priority)
122
+
123
+ return ordered_groups, result
124
+
125
+
126
+ def persistent_storage_enabled() -> bool:
127
+ return (
128
+ os.environ.get("PERSISTANT_STORAGE_ENABLED") == "true"
129
+ ) # typo in the name of the environment variable
130
+
131
+
132
+ def _get_trackio_dir() -> Path:
133
+ if persistent_storage_enabled():
134
+ return Path("/data/trackio")
135
+ elif os.environ.get("TRACKIO_DIR"):
136
+ return Path(os.environ.get("TRACKIO_DIR"))
137
+ return Path(HF_HOME) / "trackio"
138
+
139
+
140
+ TRACKIO_DIR = _get_trackio_dir()
141
+ MEDIA_DIR = TRACKIO_DIR / "media"
142
+
143
+
144
+ def get_or_create_project_hash(project: str) -> str:
145
+ hash_path = TRACKIO_DIR / f"{project}.hash"
146
+ if hash_path.exists():
147
+ return hash_path.read_text().strip()
148
+ hash_value = secrets.token_urlsafe(8)
149
+ TRACKIO_DIR.mkdir(parents=True, exist_ok=True)
150
+ hash_path.write_text(hash_value)
151
+ return hash_value
152
+
153
+
154
+ def generate_readable_name(used_names: list[str], space_id: str | None = None) -> str:
155
+ """
156
+ Generates a random, readable name like "dainty-sunset-0".
157
+ If space_id is provided, generates username-timestamp format instead.
158
+ """
159
+ if space_id is not None:
160
+ username = _get_default_namespace()
161
+ timestamp = int(time.time())
162
+ return f"{username}-{timestamp}"
163
+ adjectives = [
164
+ "dainty",
165
+ "brave",
166
+ "calm",
167
+ "eager",
168
+ "fancy",
169
+ "gentle",
170
+ "happy",
171
+ "jolly",
172
+ "kind",
173
+ "lively",
174
+ "merry",
175
+ "nice",
176
+ "proud",
177
+ "quick",
178
+ "hugging",
179
+ "silly",
180
+ "tidy",
181
+ "witty",
182
+ "zealous",
183
+ "bright",
184
+ "shy",
185
+ "bold",
186
+ "clever",
187
+ "daring",
188
+ "elegant",
189
+ "faithful",
190
+ "graceful",
191
+ "honest",
192
+ "inventive",
193
+ "jovial",
194
+ "keen",
195
+ "lucky",
196
+ "modest",
197
+ "noble",
198
+ "optimistic",
199
+ "patient",
200
+ "quirky",
201
+ "resourceful",
202
+ "sincere",
203
+ "thoughtful",
204
+ "upbeat",
205
+ "valiant",
206
+ "warm",
207
+ "youthful",
208
+ "zesty",
209
+ "adventurous",
210
+ "breezy",
211
+ "cheerful",
212
+ "delightful",
213
+ "energetic",
214
+ "fearless",
215
+ "glad",
216
+ "hopeful",
217
+ "imaginative",
218
+ "joyful",
219
+ "kindly",
220
+ "luminous",
221
+ "mysterious",
222
+ "neat",
223
+ "outgoing",
224
+ "playful",
225
+ "radiant",
226
+ "spirited",
227
+ "tranquil",
228
+ "unique",
229
+ "vivid",
230
+ "wise",
231
+ "zany",
232
+ "artful",
233
+ "bubbly",
234
+ "charming",
235
+ "dazzling",
236
+ "earnest",
237
+ "festive",
238
+ "gentlemanly",
239
+ "hearty",
240
+ "intrepid",
241
+ "jubilant",
242
+ "knightly",
243
+ "lively",
244
+ "magnetic",
245
+ "nimble",
246
+ "orderly",
247
+ "peaceful",
248
+ "quick-witted",
249
+ "robust",
250
+ "sturdy",
251
+ "trusty",
252
+ "upstanding",
253
+ "vibrant",
254
+ "whimsical",
255
+ ]
256
+ nouns = [
257
+ "sunset",
258
+ "forest",
259
+ "river",
260
+ "mountain",
261
+ "breeze",
262
+ "meadow",
263
+ "ocean",
264
+ "valley",
265
+ "sky",
266
+ "field",
267
+ "cloud",
268
+ "star",
269
+ "rain",
270
+ "leaf",
271
+ "stone",
272
+ "flower",
273
+ "bird",
274
+ "tree",
275
+ "wave",
276
+ "trail",
277
+ "island",
278
+ "desert",
279
+ "hill",
280
+ "lake",
281
+ "pond",
282
+ "grove",
283
+ "canyon",
284
+ "reef",
285
+ "bay",
286
+ "peak",
287
+ "glade",
288
+ "marsh",
289
+ "cliff",
290
+ "dune",
291
+ "spring",
292
+ "brook",
293
+ "cave",
294
+ "plain",
295
+ "ridge",
296
+ "wood",
297
+ "blossom",
298
+ "petal",
299
+ "root",
300
+ "branch",
301
+ "seed",
302
+ "acorn",
303
+ "pine",
304
+ "willow",
305
+ "cedar",
306
+ "elm",
307
+ "falcon",
308
+ "eagle",
309
+ "sparrow",
310
+ "robin",
311
+ "owl",
312
+ "finch",
313
+ "heron",
314
+ "crane",
315
+ "duck",
316
+ "swan",
317
+ "fox",
318
+ "wolf",
319
+ "bear",
320
+ "deer",
321
+ "moose",
322
+ "otter",
323
+ "beaver",
324
+ "lynx",
325
+ "hare",
326
+ "badger",
327
+ "butterfly",
328
+ "bee",
329
+ "ant",
330
+ "beetle",
331
+ "dragonfly",
332
+ "firefly",
333
+ "ladybug",
334
+ "moth",
335
+ "spider",
336
+ "worm",
337
+ "coral",
338
+ "kelp",
339
+ "shell",
340
+ "pebble",
341
+ "face",
342
+ "boulder",
343
+ "cobble",
344
+ "sand",
345
+ "wavelet",
346
+ "tide",
347
+ "current",
348
+ "mist",
349
+ ]
350
+ number = 0
351
+ name = f"{adjectives[0]}-{nouns[0]}-{number}"
352
+ while name in used_names:
353
+ number += 1
354
+ adjective = adjectives[number % len(adjectives)]
355
+ noun = nouns[number % len(nouns)]
356
+ name = f"{adjective}-{noun}-{number}"
357
+ return name
358
+
359
+
360
+ def is_in_notebook():
361
+ """
362
+ Detect if code is running in a notebook environment (Jupyter, Colab, etc.).
363
+ """
364
+ try:
365
+ from IPython import get_ipython
366
+
367
+ if get_ipython() is not None:
368
+ return get_ipython().__class__.__name__ in [
369
+ "ZMQInteractiveShell", # Jupyter notebook/lab
370
+ "Shell", # IPython terminal
371
+ ] or "google.colab" in str(get_ipython())
372
+ except ImportError:
373
+ pass
374
+ return False
375
+
376
+
377
+ def block_main_thread_until_keyboard_interrupt():
378
+ try:
379
+ while True:
380
+ time.sleep(0.1)
381
+ except (KeyboardInterrupt, OSError):
382
+ print("Keyboard interruption in main thread... closing dashboard.")
383
+
384
+
385
+ def simplify_column_names(columns: list[str]) -> dict[str, str]:
386
+ """
387
+ Simplifies column names to first 10 alphanumeric or "/" characters with unique suffixes.
388
+
389
+ Args:
390
+ columns: List of original column names
391
+
392
+ Returns:
393
+ Dictionary mapping original column names to simplified names
394
+ """
395
+ simplified_names = {}
396
+ used_names = set()
397
+
398
+ for col in columns:
399
+ alphanumeric = re.sub(r"[^a-zA-Z0-9/]", "", col)
400
+ base_name = alphanumeric[:10] if alphanumeric else f"col_{len(used_names)}"
401
+
402
+ final_name = base_name
403
+ suffix = 1
404
+ while final_name in used_names:
405
+ final_name = f"{base_name}_{suffix}"
406
+ suffix += 1
407
+
408
+ simplified_names[col] = final_name
409
+ used_names.add(final_name)
410
+
411
+ return simplified_names
412
+
413
+
414
+ def print_dashboard_instructions(project: str) -> None:
415
+ """
416
+ Prints instructions for viewing the Trackio dashboard.
417
+
418
+ Args:
419
+ project: The name of the project to show dashboard for.
420
+ """
421
+ ORANGE = "\033[38;5;208m"
422
+ BOLD = "\033[1m"
423
+ RESET = "\033[0m"
424
+
425
+ print("* View dashboard by running in your terminal:")
426
+ print(f'{BOLD}{ORANGE}trackio show --project "{project}"{RESET}')
427
+ print(f'* or by running in Python: trackio.show(project="{project}")')
428
+
429
+
430
+ def preprocess_space_and_dataset_ids(
431
+ space_id: str | None, dataset_id: str | None
432
+ ) -> tuple[str | None, str | None]:
433
+ """
434
+ Preprocesses the Space and Dataset names to ensure they are valid "username/space_id" or "username/dataset_id" format.
435
+ """
436
+ if space_id is not None and "/" not in space_id:
437
+ username = _get_default_namespace()
438
+ space_id = f"{username}/{space_id}"
439
+ if dataset_id is not None and "/" not in dataset_id:
440
+ username = _get_default_namespace()
441
+ dataset_id = f"{username}/{dataset_id}"
442
+ if space_id is not None and dataset_id is None:
443
+ dataset_id = f"{space_id}-dataset"
444
+ return space_id, dataset_id
445
+
446
+
447
+ def fibo():
448
+ """Generator for Fibonacci backoff: 1, 1, 2, 3, 5, 8, ..."""
449
+ a, b = 1, 1
450
+ while True:
451
+ yield a
452
+ a, b = b, a + b
453
+
454
+
455
+ def format_timestamp(timestamp_str):
456
+ """Convert ISO timestamp to human-readable format like '3 minutes ago'."""
457
+ if not timestamp_str or pd.isna(timestamp_str):
458
+ return "Unknown"
459
+
460
+ try:
461
+ created_time = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00"))
462
+ if created_time.tzinfo is None:
463
+ created_time = created_time.replace(tzinfo=timezone.utc)
464
+
465
+ now = datetime.now(timezone.utc)
466
+ diff = now - created_time
467
+
468
+ seconds = int(diff.total_seconds())
469
+ if seconds < 60:
470
+ return "Just now"
471
+ elif seconds < 3600:
472
+ minutes = seconds // 60
473
+ return f"{minutes} minute{'s' if minutes != 1 else ''} ago"
474
+ elif seconds < 86400:
475
+ hours = seconds // 3600
476
+ return f"{hours} hour{'s' if hours != 1 else ''} ago"
477
+ else:
478
+ days = seconds // 86400
479
+ return f"{days} day{'s' if days != 1 else ''} ago"
480
+ except Exception:
481
+ return "Unknown"
482
+
483
+
484
+ DEFAULT_COLOR_PALETTE = [
485
+ "#A8769B",
486
+ "#E89957",
487
+ "#3B82F6",
488
+ "#10B981",
489
+ "#EF4444",
490
+ "#8B5CF6",
491
+ "#14B8A6",
492
+ "#F59E0B",
493
+ "#EC4899",
494
+ "#06B6D4",
495
+ ]
496
+
497
+
498
+ def get_color_palette() -> list[str]:
499
+ """Get the color palette from environment variable or use default."""
500
+ env_palette = os.environ.get("TRACKIO_COLOR_PALETTE")
501
+ if env_palette:
502
+ return [color.strip() for color in env_palette.split(",")]
503
+ return DEFAULT_COLOR_PALETTE
504
+
505
+
506
+ def get_color_mapping(
507
+ runs: list[str], smoothing: bool, color_palette: list[str] | None = None
508
+ ) -> dict[str, str]:
509
+ """Generate color mapping for runs, with transparency for original data when smoothing is enabled."""
510
+ if color_palette is None:
511
+ color_palette = get_color_palette()
512
+
513
+ color_map = {}
514
+
515
+ for i, run in enumerate(runs):
516
+ base_color = color_palette[i % len(color_palette)]
517
+
518
+ if smoothing:
519
+ color_map[run] = base_color + "4D"
520
+ color_map[f"{run}_smoothed"] = base_color
521
+ else:
522
+ color_map[run] = base_color
523
+
524
+ return color_map
525
+
526
+
527
+ def downsample(
528
+ df: pd.DataFrame,
529
+ x: str,
530
+ y: str,
531
+ color: str | None,
532
+ x_lim: tuple[float | None, float | None] | None = None,
533
+ ) -> tuple[pd.DataFrame, tuple[float, float] | None]:
534
+ """
535
+ Downsample the dataframe to reduce the number of points plotted.
536
+ Also updates the x-axis limits to the data min/max if either of the x-axis limits are None.
537
+
538
+ Args:
539
+ df: The dataframe to downsample.
540
+ x: The column name to use for the x-axis.
541
+ y: The column name to use for the y-axis.
542
+ color: The column name to use for the color.
543
+ x_lim: The x-axis limits to use.
544
+
545
+ Returns:
546
+ A tuple containing the downsampled dataframe and the updated x-axis limits.
547
+ """
548
+ if df.empty:
549
+ if x_lim is not None:
550
+ x_lim = (x_lim[0] or 0, x_lim[1] or 0)
551
+ return df, x_lim
552
+
553
+ columns_to_keep = [x, y]
554
+ if color is not None and color in df.columns:
555
+ columns_to_keep.append(color)
556
+ df = df[columns_to_keep].copy()
557
+
558
+ data_x_min = df[x].min()
559
+ data_x_max = df[x].max()
560
+
561
+ if x_lim is not None:
562
+ x_min, x_max = x_lim
563
+ if x_min is None:
564
+ x_min = data_x_min
565
+ if x_max is None:
566
+ x_max = data_x_max
567
+ updated_x_lim = (x_min, x_max)
568
+ else:
569
+ updated_x_lim = None
570
+
571
+ n_bins = 100
572
+
573
+ if color is not None and color in df.columns:
574
+ groups = df.groupby(color)
575
+ else:
576
+ groups = [(None, df)]
577
+
578
+ downsampled_indices = []
579
+
580
+ for _, group_df in groups:
581
+ if group_df.empty:
582
+ continue
583
+
584
+ group_df = group_df.sort_values(x)
585
+
586
+ if updated_x_lim is not None:
587
+ x_min, x_max = updated_x_lim
588
+ before_point = group_df[group_df[x] < x_min].tail(1)
589
+ after_point = group_df[group_df[x] > x_max].head(1)
590
+ group_df = group_df[(group_df[x] >= x_min) & (group_df[x] <= x_max)]
591
+ else:
592
+ before_point = after_point = None
593
+ x_min = group_df[x].min()
594
+ x_max = group_df[x].max()
595
+
596
+ if before_point is not None and not before_point.empty:
597
+ downsampled_indices.extend(before_point.index.tolist())
598
+ if after_point is not None and not after_point.empty:
599
+ downsampled_indices.extend(after_point.index.tolist())
600
+
601
+ if group_df.empty:
602
+ continue
603
+
604
+ if x_min == x_max:
605
+ min_y_idx = group_df[y].idxmin()
606
+ max_y_idx = group_df[y].idxmax()
607
+ if min_y_idx != max_y_idx:
608
+ downsampled_indices.extend([min_y_idx, max_y_idx])
609
+ else:
610
+ downsampled_indices.append(min_y_idx)
611
+ continue
612
+
613
+ if len(group_df) < 500:
614
+ downsampled_indices.extend(group_df.index.tolist())
615
+ continue
616
+
617
+ bins = np.linspace(x_min, x_max, n_bins + 1)
618
+ group_df["bin"] = pd.cut(
619
+ group_df[x], bins=bins, labels=False, include_lowest=True
620
+ )
621
+
622
+ for bin_idx in group_df["bin"].dropna().unique():
623
+ bin_data = group_df[group_df["bin"] == bin_idx]
624
+ if bin_data.empty:
625
+ continue
626
+
627
+ min_y_idx = bin_data[y].idxmin()
628
+ max_y_idx = bin_data[y].idxmax()
629
+
630
+ downsampled_indices.append(min_y_idx)
631
+ if min_y_idx != max_y_idx:
632
+ downsampled_indices.append(max_y_idx)
633
+
634
+ unique_indices = list(set(downsampled_indices))
635
+
636
+ downsampled_df = df.loc[unique_indices].copy()
637
+
638
+ if color is not None:
639
+ downsampled_df = (
640
+ downsampled_df.groupby(color, sort=False)[downsampled_df.columns]
641
+ .apply(lambda group: group.sort_values(x))
642
+ .reset_index(drop=True)
643
+ )
644
+ else:
645
+ downsampled_df = downsampled_df.sort_values(x).reset_index(drop=True)
646
+
647
+ downsampled_df = downsampled_df.drop(columns=["bin"], errors="ignore")
648
+
649
+ return downsampled_df, updated_x_lim
650
+
651
+
652
+ def sort_metrics_by_prefix(metrics: list[str]) -> list[str]:
653
+ """
654
+ Sort metrics by grouping prefixes together for dropdown/list display.
655
+ Metrics without prefixes come first, then grouped by prefix.
656
+
657
+ Args:
658
+ metrics: List of metric names
659
+
660
+ Returns:
661
+ List of metric names sorted by prefix
662
+
663
+ Example:
664
+ Input: ["train/loss", "loss", "train/acc", "val/loss"]
665
+ Output: ["loss", "train/acc", "train/loss", "val/loss"]
666
+ """
667
+ groups = group_metrics_by_prefix(metrics)
668
+ result = []
669
+
670
+ if "charts" in groups:
671
+ result.extend(groups["charts"])
672
+
673
+ for group_name in sorted(groups.keys()):
674
+ if group_name != "charts":
675
+ result.extend(groups[group_name])
676
+
677
+ return result
678
+
679
+
680
+ def group_metrics_by_prefix(metrics: list[str]) -> dict[str, list[str]]:
681
+ """
682
+ Group metrics by their prefix. Metrics without prefix go to 'charts' group.
683
+
684
+ Args:
685
+ metrics: List of metric names
686
+
687
+ Returns:
688
+ Dictionary with prefix names as keys and lists of metrics as values
689
+
690
+ Example:
691
+ Input: ["loss", "accuracy", "train/loss", "train/acc", "val/loss"]
692
+ Output: {
693
+ "charts": ["loss", "accuracy"],
694
+ "train": ["train/loss", "train/acc"],
695
+ "val": ["val/loss"]
696
+ }
697
+ """
698
+ no_prefix = []
699
+ with_prefix = []
700
+
701
+ for metric in metrics:
702
+ if "/" in metric:
703
+ with_prefix.append(metric)
704
+ else:
705
+ no_prefix.append(metric)
706
+
707
+ no_prefix.sort()
708
+
709
+ prefix_groups = {}
710
+ for metric in with_prefix:
711
+ prefix = metric.split("/")[0]
712
+ if prefix not in prefix_groups:
713
+ prefix_groups[prefix] = []
714
+ prefix_groups[prefix].append(metric)
715
+
716
+ for prefix in prefix_groups:
717
+ prefix_groups[prefix].sort()
718
+
719
+ groups = {}
720
+ if no_prefix:
721
+ groups["charts"] = no_prefix
722
+
723
+ for prefix in sorted(prefix_groups.keys()):
724
+ groups[prefix] = prefix_groups[prefix]
725
+
726
+ return groups
727
+
728
+
729
+ def get_sync_status(scheduler: "CommitScheduler | DummyCommitScheduler") -> int | None:
730
+ """Get the sync status from the CommitScheduler in an integer number of minutes, or None if not synced yet."""
731
+ if getattr(
732
+ scheduler, "last_push_time", None
733
+ ): # DummyCommitScheduler doesn't have last_push_time
734
+ time_diff = time.time() - scheduler.last_push_time
735
+ return int(time_diff / 60)
736
+ else:
737
+ return None
738
+
739
+
740
+ def generate_share_url(
741
+ project: str,
742
+ metrics: str,
743
+ selected_runs: list = None,
744
+ hide_headers: bool = False,
745
+ ) -> str:
746
+ """Generate the shareable Space URL based on current settings."""
747
+ space_host = os.environ.get("SPACE_HOST", "")
748
+ if not space_host:
749
+ return ""
750
+
751
+ params: dict[str, str] = {}
752
+
753
+ if project:
754
+ params["project"] = project
755
+
756
+ if metrics and metrics.strip():
757
+ params["metrics"] = metrics
758
+
759
+ if selected_runs:
760
+ params["runs"] = ",".join(selected_runs)
761
+
762
+ if hide_headers:
763
+ params["accordion"] = "hidden"
764
+ params["sidebar"] = "hidden"
765
+ params["navbar"] = "hidden"
766
+
767
+ query_string = urlencode(params)
768
+ return f"https://{space_host}?{query_string}"
769
+
770
+
771
+ def generate_embed_code(
772
+ project: str,
773
+ metrics: str,
774
+ selected_runs: list = None,
775
+ hide_headers: bool = False,
776
+ ) -> str:
777
+ """Generate the embed iframe code based on current settings."""
778
+ embed_url = generate_share_url(project, metrics, selected_runs, hide_headers)
779
+ if not embed_url:
780
+ return ""
781
+
782
+ return f'<iframe src="{embed_url}" style="width:1600px; height:500px; border:0;"></iframe>'
783
+
784
+
785
+ def serialize_values(metrics):
786
+ """
787
+ Serialize infinity and NaN values in metrics dict to make it JSON-compliant.
788
+ Only handles top-level float values.
789
+
790
+ Converts:
791
+ - float('inf') -> "Infinity"
792
+ - float('-inf') -> "-Infinity"
793
+ - float('nan') -> "NaN"
794
+
795
+ Example:
796
+ {"loss": float('inf'), "accuracy": 0.95} -> {"loss": "Infinity", "accuracy": 0.95}
797
+ """
798
+ if not isinstance(metrics, dict):
799
+ return metrics
800
+
801
+ result = {}
802
+ for key, value in metrics.items():
803
+ if isinstance(value, float):
804
+ if math.isinf(value):
805
+ result[key] = "Infinity" if value > 0 else "-Infinity"
806
+ elif math.isnan(value):
807
+ result[key] = "NaN"
808
+ else:
809
+ result[key] = value
810
+ elif isinstance(value, np.floating):
811
+ float_val = float(value)
812
+ if math.isinf(float_val):
813
+ result[key] = "Infinity" if float_val > 0 else "-Infinity"
814
+ elif math.isnan(float_val):
815
+ result[key] = "NaN"
816
+ else:
817
+ result[key] = float_val
818
+ else:
819
+ result[key] = value
820
+ return result
821
+
822
+
823
+ def deserialize_values(metrics):
824
+ """
825
+ Deserialize infinity and NaN string values back to their numeric forms.
826
+ Only handles top-level string values.
827
+
828
+ Converts:
829
+ - "Infinity" -> float('inf')
830
+ - "-Infinity" -> float('-inf')
831
+ - "NaN" -> float('nan')
832
+
833
+ Example:
834
+ {"loss": "Infinity", "accuracy": 0.95} -> {"loss": float('inf'), "accuracy": 0.95}
835
+ """
836
+ if not isinstance(metrics, dict):
837
+ return metrics
838
+
839
+ result = {}
840
+ for key, value in metrics.items():
841
+ if value == "Infinity":
842
+ result[key] = float("inf")
843
+ elif value == "-Infinity":
844
+ result[key] = float("-inf")
845
+ elif value == "NaN":
846
+ result[key] = float("nan")
847
+ else:
848
+ result[key] = value
849
+ return result
850
+
851
+
852
+ def get_full_url(
853
+ base_url: str, project: str | None, write_token: str, footer: bool = True
854
+ ) -> str:
855
+ params = []
856
+ if project:
857
+ params.append(f"project={project}")
858
+ params.append(f"write_token={write_token}")
859
+ if not footer:
860
+ params.append("footer=false")
861
+ return base_url + "?" + "&".join(params)
862
+
863
+
864
+ def embed_url_in_notebook(url: str) -> None:
865
+ try:
866
+ from IPython.display import HTML, display
867
+
868
+ embed_code = HTML(
869
+ f'<div><iframe src="{url}" width="100%" height="1000px" allow="autoplay; camera; microphone; clipboard-read; clipboard-write;" frameborder="0" allowfullscreen></iframe></div>'
870
+ )
871
+ display(embed_code)
872
+ except ImportError:
873
+ pass
874
+
875
+
876
+ def to_json_safe(obj):
877
+ if isinstance(obj, (str, int, float, bool, type(None))):
878
+ return obj
879
+ if isinstance(obj, np.generic):
880
+ return obj.item()
881
+ if isinstance(obj, dict):
882
+ return {str(k): to_json_safe(v) for k, v in obj.items()}
883
+ if isinstance(obj, (list, tuple, set)):
884
+ return [to_json_safe(v) for v in obj]
885
+ if hasattr(obj, "to_dict") and callable(obj.to_dict):
886
+ return to_json_safe(obj.to_dict())
887
+ if hasattr(obj, "__dict__"):
888
+ return {
889
+ str(k): to_json_safe(v)
890
+ for k, v in vars(obj).items()
891
+ if not k.startswith("_")
892
+ }
893
+ return str(obj)
894
+
895
+
896
+ def get_space() -> str | None:
897
+ """
898
+ Get the space ID ("user/space") if Trackio is running in a Space, or None if not.
899
+ """
900
+ return os.environ.get("SPACE_ID")
901
+
902
+
903
+ def ordered_subset(items: list[str], subset: list[str] | None) -> list[str]:
904
+ subset_set = set(subset or [])
905
+ return [item for item in items if item in subset_set]
906
+
907
+
908
+ def _get_default_namespace() -> str:
909
+ """Get the default namespace (username).
910
+
911
+ This function uses caching to avoid repeated API calls to /whoami-v2.
912
+ """
913
+ token = huggingface_hub.get_token()
914
+ return _cached_whoami(token)["name"]
915
+
916
+
917
+ @lru_cache(maxsize=32)
918
+ def _cached_whoami(token: str | None) -> dict:
919
+ return huggingface_hub.whoami(token=token)