Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-01d48ffa-028c-433e-b1bd-8c7222804b151765227306552-2025_12_08-21.55.13.356/source.csv +365 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-036b3473-9f9d-44a0-a906-2567459f706c1754119162409-2025_08_02-09.19.29.910/source.csv +119 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-091406d7-396d-478a-8984-cf3f17325f1e1761067614689-2025_10_21-19.27.02.112/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0a6116e9-6ace-40c6-ab67-84982fd29f7e1760875742620-2025_10_19-14.09.09.963/source.csv +83 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0f6ed796-3fd2-4eb6-ad79-777c3b4353711750804480941-2025_06_25-00.35.32.827/source.csv +215 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-12014243-c6dd-4552-b068-b295ea888a8c1764864769359-2025_12_04-17.12.58.257/source.csv +266 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-13a6759c-b987-4abe-95b3-2a670a8e33c11765778999540-2025_12_15-07.10.10.804/source.csv +28 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1409d307-b9ff-4e0c-ab0e-0cc111e3d75a1755423127885-2025_08_17-11.32.09.747/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-14f0662f-0032-43e8-be9f-8e53d6f150ad1758635869882-2025_09_23-15.57.52.616/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-151b5a28-ad9d-42b1-abde-16bb640375391764847216251-2025_12_04-12.20.23.736/source.csv +345 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-17036a72-bca1-4851-94e3-ba9ef28c8a7c1767618340894-2026_01_05-14.05.55.846/source.csv +124 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-172472a9-c7ba-4eb8-8864-d6b51755816f1755676569546-2025_08_20-09.56.18.547/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-18c1122b-fb8e-4e24-ad01-0c4ee70d5bc01763555122672-2025_11_19-13.25.30.350/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-19886bd2-bbe1-43e2-a1cc-a5022d9247ba1763364044525-2025_11_17-08.20.54.212/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1a3a672d-e65d-446e-bbe2-839587893f7a1765464330546-2025_12_11-15.45.42.895/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1bc1f935-6cba-40dc-8614-b9589f348ebe1756235407246-2025_08_26-21.10.10.19/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1c7c7dc7-1c60-41b6-b978-9aa317222b551758733938891-2025_09_24-19.12.33.158/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1e01b6fa-0da6-410c-a989-4fb4b34be9451766512076737-2025_12_23-18.48.05.304/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1e591805-0455-42eb-ab51-85bbcb59890f1764868794984-2025_12_04-18.20.01.983/source.csv +122 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1f4647f6-404b-450c-8af8-65b66087950a1764326087309-2025_11_28-11.34.59.44/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1fa9b85d-3794-4f3b-b7a0-5170b7d2faaa1762362332596-2025_11_05-18.05.39.648/source.csv +19 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-204cbf10-93bf-4708-b946-a2b6194d891b1758618195751-2025_09_23-11.03.19.580/source.csv +4 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-207fe3e3-7fd2-432d-a410-a7a943195e5f1753557295596-2025_07_26-21.15.03.812/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-22253aff-e784-4c9c-8895-436e45dc7cfd1762854327996-2025_11_11-10.45.34.479/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-23a9afd3-c333-4e29-b2ed-efddc66dd34c1757847239961-2025_09_14-11.54.02.348/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-24737a5e-b7f6-491e-94c6-0c20304cd1e41754227167268-2025_08_03-15.19.34.553/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-29574e2f-1a08-4b03-801c-d0672dd595cd1758207078337-2025_09_18-16.51.34.658/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3078fdf5-e108-4d67-88ef-d05be53da09c1757066213479-2025_09_05-11.57.01.57/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3214e60a-43fc-4481-8dba-af84e93214741765441398363-2025_12_11-09.23.30.463/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-332f83dd-06bd-40a9-81d8-350d79e2bdb41764501843297-2025_11_30-12.24.14.12/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-351cc1d9-c2dd-47fa-95b6-84efafa3a3391765561395177-2025_12_12-18.43.22.49/source.csv +111 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-372ef62d-2075-43ff-ac1a-e2025fd873c41751612450082-2025_07_04-09.01.47.125/source.csv +47 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-37584002-fec7-4d34-873a-4137a623e46d1757936313205-2025_09_15-13.38.38.949/source.csv +154 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-38901971-8972-4ee9-90a3-96f537861f5b1761393531768-2025_10_25-13.59.11.993/source.csv +137 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-38e9e69b-6954-425b-902a-36a03773ab2b1767632090892-2026_01_05-17.54.57.36/source.csv +218 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3e3ce02e-664a-4f58-9d7f-0f56e32c7def1753363875204-2025_07_24-15.31.23.202/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4001b8e0-0e9c-4560-958e-a52f816eab081767861210539-2026_01_08-09.33.45.304/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-400c1369-388a-4663-9475-37c30815fb401752229095376-2025_07_11-12.18.28.454/source.csv +233 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-42033ef5-0153-4063-bb24-8be488cec0e41763382064980-2025_11_17-13.21.21.725/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4475b0d1-da06-4a22-b499-8b87ae83c0be1764503869494-2025_11_30-12.58.11.549/source.csv +2 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-47e5b338-8100-4eb3-a0af-f85f93ebef961765779167850-2025_12_15-07.12.55.913/source.csv +3 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-48d4bcc9-cb57-44d4-8bb2-7caa4daeab621759566467891-2025_10_04-10.27.59.485/source.csv +221 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4a0c523e-3509-46d2-9ab8-f144f364f7ff1755356823323-2025_08_16-17.07.06.759/source.csv +10 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4a17e944-2634-4ccf-bfc0-d106691c39681765284626921-2025_12_09-13.50.38.396/source.csv +2 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5457b093-d76e-4c99-8ea5-85e61f20bc071761477934870-2025_10_26-12.26.46.766/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-585eaec2-345a-44c8-b05a-f53a6d3046971761933036598-2025_10_31-18.50.42.679/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-59e65827-52ef-4a43-bfe9-199bbb450d831767620386754-2026_01_05-14.39.54.856/source.csv +87 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5b1a6152-1602-4538-a4b1-6fa9507221151753212707189-2025_07_22-21.32.36.855/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5d37fb4d-73be-43f4-bdda-1c3c7db3bdf31752589529764-2025_07_15-16.25.37.55/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-60545c4b-0ad0-4693-a006-70ea0695f2d01758208774932-2025_09_18-17.19.56.698/source.csv +0 -0
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-01d48ffa-028c-433e-b1bd-8c7222804b151765227306552-2025_12_08-21.55.13.356/source.csv
ADDED
|
@@ -0,0 +1,365 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,203,"Untitled-1",0,0,"",plaintext,tab
|
| 3 |
+
2,392,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:55:13 PM [info] Activating crowd-code\n9:55:13 PM [info] Recording started\n9:55:13 PM [info] Initializing git provider using file system watchers...\n9:55:13 PM [info] No workspace folder found\n",Log,tab
|
| 4 |
+
3,578,"Untitled-1",0,0,"",plaintext,tab
|
| 5 |
+
4,15673,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",0,0,"#!/usr/bin/env python3\nimport sys\nfrom pathlib import Path\nimport pandas as pd\n\ninput_dir = sys.argv[1]\nfor parquet_file in sorted(Path(input_dir).glob(""shard_*.parquet"")):\n df = pd.read_parquet(parquet_file)\n for text in df['text']:\n print(text)\n",python,tab
|
| 6 |
+
5,18897,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",103,0,"",python,selection_command
|
| 7 |
+
6,28543,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",127,0,"",python,selection_command
|
| 8 |
+
7,28680,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",196,0,"",python,selection_command
|
| 9 |
+
8,28932,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",235,0,"",python,selection_command
|
| 10 |
+
9,28974,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",258,0,"",python,selection_command
|
| 11 |
+
10,29007,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",260,0,"",python,selection_command
|
| 12 |
+
11,29512,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",260,0,"\n",python,content
|
| 13 |
+
12,34603,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,0,"d",python,content
|
| 14 |
+
13,34606,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"",python,selection_keyboard
|
| 15 |
+
14,34696,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"e",python,content
|
| 16 |
+
15,34699,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,0,"",python,selection_keyboard
|
| 17 |
+
16,34904,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,0,"f",python,content
|
| 18 |
+
17,34908,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0,"",python,selection_keyboard
|
| 19 |
+
18,34948,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0," ",python,content
|
| 20 |
+
19,34951,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"",python,selection_keyboard
|
| 21 |
+
20,36640,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0,"",python,selection_command
|
| 22 |
+
21,37380,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"",python,selection_command
|
| 23 |
+
22,38115,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,1,"",python,content
|
| 24 |
+
23,38297,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,1,"",python,content
|
| 25 |
+
24,38474,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,1,"",python,content
|
| 26 |
+
25,38640,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,1,"",python,content
|
| 27 |
+
26,38758,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,0,"d",python,content
|
| 28 |
+
27,38760,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"",python,selection_keyboard
|
| 29 |
+
28,38842,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"e",python,content
|
| 30 |
+
29,38845,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,0,"",python,selection_keyboard
|
| 31 |
+
30,39038,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,0,"f",python,content
|
| 32 |
+
31,39041,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0,"",python,selection_keyboard
|
| 33 |
+
32,39089,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0," ",python,content
|
| 34 |
+
33,39091,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"",python,selection_keyboard
|
| 35 |
+
34,41874,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"h",python,content
|
| 36 |
+
35,41876,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",266,0,"",python,selection_keyboard
|
| 37 |
+
36,41971,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",266,0,"e",python,content
|
| 38 |
+
37,41973,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",267,0,"",python,selection_keyboard
|
| 39 |
+
38,42139,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",267,0,"l",python,content
|
| 40 |
+
39,42141,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",268,0,"",python,selection_keyboard
|
| 41 |
+
40,42257,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",268,0,"l",python,content
|
| 42 |
+
41,42259,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",269,0,"",python,selection_keyboard
|
| 43 |
+
42,42487,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",269,0,"o",python,content
|
| 44 |
+
43,42490,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",270,0,"",python,selection_keyboard
|
| 45 |
+
44,42996,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",270,0,"_",python,content
|
| 46 |
+
45,42998,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",271,0,"",python,selection_keyboard
|
| 47 |
+
46,43055,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",271,0,"w",python,content
|
| 48 |
+
47,43057,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",272,0,"",python,selection_keyboard
|
| 49 |
+
48,43189,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",272,0,"o",python,content
|
| 50 |
+
49,43191,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",273,0,"",python,selection_keyboard
|
| 51 |
+
50,43239,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",273,0,"r",python,content
|
| 52 |
+
51,43241,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",274,0,"",python,selection_keyboard
|
| 53 |
+
52,43491,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",274,0,"l",python,content
|
| 54 |
+
53,43493,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",275,0,"",python,selection_keyboard
|
| 55 |
+
54,43570,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",275,0,"d",python,content
|
| 56 |
+
55,43572,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",276,0,"",python,selection_keyboard
|
| 57 |
+
56,43840,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",276,0,"()",python,content
|
| 58 |
+
57,43842,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",277,0,"",python,selection_keyboard
|
| 59 |
+
58,43931,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",277,1,")",python,content
|
| 60 |
+
59,43933,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"",python,selection_keyboard
|
| 61 |
+
60,44522,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,":",python,content
|
| 62 |
+
61,44524,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",279,0,"",python,selection_keyboard
|
| 63 |
+
62,44855,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",279,0,"\n ",python,content
|
| 64 |
+
63,47409,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"",python,selection_command
|
| 65 |
+
64,47537,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",284,0,"",python,selection_command
|
| 66 |
+
65,47951,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",284,0,"p",python,content
|
| 67 |
+
66,47953,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",285,0,"",python,selection_keyboard
|
| 68 |
+
67,48044,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",285,0,"r",python,content
|
| 69 |
+
68,48047,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",286,0,"",python,selection_keyboard
|
| 70 |
+
69,48206,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",286,0,"i",python,content
|
| 71 |
+
70,48209,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",287,0,"",python,selection_keyboard
|
| 72 |
+
71,48387,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",287,0,"n",python,content
|
| 73 |
+
72,48389,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",288,0,"",python,selection_keyboard
|
| 74 |
+
73,48678,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",288,0,"t",python,content
|
| 75 |
+
74,48680,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",289,0,"",python,selection_keyboard
|
| 76 |
+
75,52712,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",288,0,"",python,selection_command
|
| 77 |
+
76,53308,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",289,0,"",python,selection_command
|
| 78 |
+
77,54023,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",289,0,"()",python,content
|
| 79 |
+
78,54025,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",290,0,"",python,selection_keyboard
|
| 80 |
+
79,56395,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,11," print()\n",python,content
|
| 81 |
+
80,59450,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",289,0,"",python,selection_command
|
| 82 |
+
81,60249,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",290,0,"",python,selection_command
|
| 83 |
+
82,61150,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",290,0,"?",python,content
|
| 84 |
+
83,61153,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",291,0,"",python,selection_keyboard
|
| 85 |
+
84,61938,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",290,1,"",python,content
|
| 86 |
+
85,62199,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",290,0,"""""",python,content
|
| 87 |
+
86,62201,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",291,0,"",python,selection_keyboard
|
| 88 |
+
87,67089,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",291,0,"<",python,content
|
| 89 |
+
88,67091,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",292,0,"",python,selection_keyboard
|
| 90 |
+
89,67257,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",292,0,">",python,content
|
| 91 |
+
90,67259,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",293,0,"",python,selection_keyboard
|
| 92 |
+
91,68139,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",292,0,"",python,selection_command
|
| 93 |
+
92,69207,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",292,0,"r",python,content
|
| 94 |
+
93,69209,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",293,0,"",python,selection_keyboard
|
| 95 |
+
94,69320,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",293,0,"e",python,content
|
| 96 |
+
95,69322,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",294,0,"",python,selection_keyboard
|
| 97 |
+
96,69554,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",294,0,"p",python,content
|
| 98 |
+
97,69556,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",295,0,"",python,selection_keyboard
|
| 99 |
+
98,69855,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",295,0,"l",python,content
|
| 100 |
+
99,69857,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",296,0,"",python,selection_keyboard
|
| 101 |
+
100,69937,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",296,0,"a",python,content
|
| 102 |
+
101,69940,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",297,0,"",python,selection_keyboard
|
| 103 |
+
102,70331,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",297,0,"c",python,content
|
| 104 |
+
103,70333,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",298,0,"",python,selection_keyboard
|
| 105 |
+
104,70565,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",298,0,"e",python,content
|
| 106 |
+
105,70567,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",299,0,"",python,selection_keyboard
|
| 107 |
+
106,70799,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",299,0," ",python,content
|
| 108 |
+
107,70801,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",300,0,"",python,selection_keyboard
|
| 109 |
+
108,70842,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",300,0,"t",python,content
|
| 110 |
+
109,70845,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",301,0,"",python,selection_keyboard
|
| 111 |
+
110,71012,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",301,0,"h",python,content
|
| 112 |
+
111,71015,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",302,0,"",python,selection_keyboard
|
| 113 |
+
112,71027,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",302,0,"i",python,content
|
| 114 |
+
113,71028,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",303,0,"",python,selection_keyboard
|
| 115 |
+
114,71221,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",303,0,"s",python,content
|
| 116 |
+
115,71223,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",304,0,"",python,selection_keyboard
|
| 117 |
+
116,71296,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",304,0," ",python,content
|
| 118 |
+
117,71298,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",305,0,"",python,selection_keyboard
|
| 119 |
+
118,71475,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",305,0,"w",python,content
|
| 120 |
+
119,71477,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",306,0,"",python,selection_keyboard
|
| 121 |
+
120,71556,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",306,0,"i",python,content
|
| 122 |
+
121,71558,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",307,0,"",python,selection_keyboard
|
| 123 |
+
122,71687,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",307,0,"t",python,content
|
| 124 |
+
123,71689,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",308,0,"",python,selection_keyboard
|
| 125 |
+
124,71765,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",308,0,"h",python,content
|
| 126 |
+
125,71767,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",309,0,"",python,selection_keyboard
|
| 127 |
+
126,71959,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",309,0," ",python,content
|
| 128 |
+
127,71961,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",310,0,"",python,selection_keyboard
|
| 129 |
+
128,72199,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",310,0,"h",python,content
|
| 130 |
+
129,72201,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",311,0,"",python,selection_keyboard
|
| 131 |
+
130,73162,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",311,0,"e",python,content
|
| 132 |
+
131,73164,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",312,0,"",python,selection_keyboard
|
| 133 |
+
132,73386,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",312,0,"l",python,content
|
| 134 |
+
133,73388,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",313,0,"",python,selection_keyboard
|
| 135 |
+
134,73538,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",313,0,"l",python,content
|
| 136 |
+
135,73540,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",314,0,"",python,selection_keyboard
|
| 137 |
+
136,73635,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",314,0,"o",python,content
|
| 138 |
+
137,73637,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",315,0,"",python,selection_keyboard
|
| 139 |
+
138,73894,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",315,0," ",python,content
|
| 140 |
+
139,73896,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",316,0,"",python,selection_keyboard
|
| 141 |
+
140,73972,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",316,0,"w",python,content
|
| 142 |
+
141,73974,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",317,0,"",python,selection_keyboard
|
| 143 |
+
142,74065,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",317,0,"o",python,content
|
| 144 |
+
143,74068,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",318,0,"",python,selection_keyboard
|
| 145 |
+
144,74138,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",318,0,"r",python,content
|
| 146 |
+
145,74141,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",319,0,"",python,selection_keyboard
|
| 147 |
+
146,74328,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",319,0,"l",python,content
|
| 148 |
+
147,74479,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",320,0,"",python,selection_keyboard
|
| 149 |
+
148,74622,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",320,0,"d",python,content
|
| 150 |
+
149,74624,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",321,0,"",python,selection_keyboard
|
| 151 |
+
150,94038,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",320,0,"",python,selection_command
|
| 152 |
+
151,94413,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"",python,selection_command
|
| 153 |
+
152,94479,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",320,0,"",python,selection_command
|
| 154 |
+
153,98914,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,45," print(""<replace this with hello world>"")\n",python,content
|
| 155 |
+
154,99979,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"",python,selection_command
|
| 156 |
+
155,100082,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",320,0,"",python,selection_command
|
| 157 |
+
156,105570,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",321,0,"",python,selection_command
|
| 158 |
+
157,105806,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",321,1,">",python,selection_command
|
| 159 |
+
158,106023,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",316,6,"world>",python,selection_command
|
| 160 |
+
159,106164,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",310,12,"hello world>",python,selection_command
|
| 161 |
+
160,106347,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",305,17,"with hello world>",python,selection_command
|
| 162 |
+
161,106500,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",300,22,"this with hello world>",python,selection_command
|
| 163 |
+
162,106694,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",292,30,"replace this with hello world>",python,selection_command
|
| 164 |
+
163,106862,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",289,33,"(""<replace this with hello world>",python,selection_command
|
| 165 |
+
164,107302,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",290,32,"""<replace this with hello world>",python,selection_command
|
| 166 |
+
165,107479,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",291,31,"<replace this with hello world>",python,selection_command
|
| 167 |
+
166,111781,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",291,31,"",python,content
|
| 168 |
+
167,115116,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",291,0,"h",python,content
|
| 169 |
+
168,115118,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",292,0,"",python,selection_keyboard
|
| 170 |
+
169,118956,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,15," print(""hello"")\n",python,content
|
| 171 |
+
170,121418,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",293,0,"",python,selection_command
|
| 172 |
+
171,121552,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",294,0,"",python,selection_command
|
| 173 |
+
172,121711,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",295,0,"",python,selection_command
|
| 174 |
+
173,121869,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",296,0,"",python,selection_command
|
| 175 |
+
174,122172,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",296,0," ",python,content
|
| 176 |
+
175,122174,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",297,0,"",python,selection_keyboard
|
| 177 |
+
176,127845,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",297,0,"w",python,content
|
| 178 |
+
177,127997,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",298,0,"",python,selection_keyboard
|
| 179 |
+
178,131226,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",298,0,"o",python,content
|
| 180 |
+
179,131229,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",299,0,"",python,selection_keyboard
|
| 181 |
+
180,132795,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",299,0,"r",python,content
|
| 182 |
+
181,132797,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",300,0,"",python,selection_keyboard
|
| 183 |
+
182,140824,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",300,0,"l",python,content
|
| 184 |
+
183,140826,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",301,0,"",python,selection_keyboard
|
| 185 |
+
184,143487,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",301,0,"d",python,content
|
| 186 |
+
185,143489,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",302,0,"",python,selection_keyboard
|
| 187 |
+
186,145314,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",302,0,"!",python,content
|
| 188 |
+
187,145316,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",303,0,"",python,selection_keyboard
|
| 189 |
+
188,147538,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",302,0,"",python,selection_command
|
| 190 |
+
189,147942,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"",python,selection_command
|
| 191 |
+
190,148302,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,18,"def hello_world():",python,selection_command
|
| 192 |
+
191,148620,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,44,"def hello_world():\n print(""hello world!"")",python,selection_command
|
| 193 |
+
192,148676,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,44,"",python,content
|
| 194 |
+
193,149373,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,0,"v",python,content
|
| 195 |
+
194,149375,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"",python,selection_keyboard
|
| 196 |
+
195,149876,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,1,"",python,content
|
| 197 |
+
196,150051,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,0,"d",python,content
|
| 198 |
+
197,150053,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"",python,selection_keyboard
|
| 199 |
+
198,150140,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"e",python,content
|
| 200 |
+
199,150142,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,0,"",python,selection_keyboard
|
| 201 |
+
200,150453,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,0,"f",python,content
|
| 202 |
+
201,150455,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0,"",python,selection_keyboard
|
| 203 |
+
202,150459,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0," ",python,content
|
| 204 |
+
203,150460,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"",python,selection_keyboard
|
| 205 |
+
204,153072,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,1,"",python,content
|
| 206 |
+
205,153869,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0," ",python,content
|
| 207 |
+
206,153871,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"",python,selection_keyboard
|
| 208 |
+
207,153916,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"m",python,content
|
| 209 |
+
208,153918,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",266,0,"",python,selection_keyboard
|
| 210 |
+
209,154182,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",266,0,"a",python,content
|
| 211 |
+
210,154185,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",267,0,"",python,selection_keyboard
|
| 212 |
+
211,154203,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",267,0,"i",python,content
|
| 213 |
+
212,154205,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",268,0,"",python,selection_keyboard
|
| 214 |
+
213,154302,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",268,0,"n",python,content
|
| 215 |
+
214,154305,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",269,0,"",python,selection_keyboard
|
| 216 |
+
215,156438,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,4,"",python,content
|
| 217 |
+
216,156596,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,4,"",python,content
|
| 218 |
+
217,156770,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,0,"i",python,content
|
| 219 |
+
218,156772,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"",python,selection_keyboard
|
| 220 |
+
219,156969,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",262,0,"f",python,content
|
| 221 |
+
220,156972,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,0,"",python,selection_keyboard
|
| 222 |
+
221,157021,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",263,0," ",python,content
|
| 223 |
+
222,157024,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0,"",python,selection_keyboard
|
| 224 |
+
223,159433,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",264,0,"_",python,content
|
| 225 |
+
224,159435,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"",python,selection_keyboard
|
| 226 |
+
225,159606,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",265,0,"_",python,content
|
| 227 |
+
226,159608,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",266,0,"",python,selection_keyboard
|
| 228 |
+
227,163040,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",266,0,"n",python,content
|
| 229 |
+
228,163043,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",267,0,"",python,selection_keyboard
|
| 230 |
+
229,163206,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",267,0,"a",python,content
|
| 231 |
+
230,163208,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",268,0,"",python,selection_keyboard
|
| 232 |
+
231,163240,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",268,0,"m",python,content
|
| 233 |
+
232,163242,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",269,0,"",python,selection_keyboard
|
| 234 |
+
233,163336,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",269,0,"e",python,content
|
| 235 |
+
234,163338,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",270,0,"",python,selection_keyboard
|
| 236 |
+
235,164124,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",270,0,"_",python,content
|
| 237 |
+
236,164126,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",271,0,"",python,selection_keyboard
|
| 238 |
+
237,164307,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",271,0,"_",python,content
|
| 239 |
+
238,164309,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",272,0,"",python,selection_keyboard
|
| 240 |
+
239,166107,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",272,0," ",python,content
|
| 241 |
+
240,166109,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",273,0,"",python,selection_keyboard
|
| 242 |
+
241,167329,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",273,0,"=",python,content
|
| 243 |
+
242,167331,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",274,0,"",python,selection_keyboard
|
| 244 |
+
243,167504,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",274,0,"=",python,content
|
| 245 |
+
244,167506,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",275,0,"",python,selection_keyboard
|
| 246 |
+
245,168848,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",275,0," ",python,content
|
| 247 |
+
246,168851,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",276,0,"",python,selection_keyboard
|
| 248 |
+
247,175498,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",276,0,"m",python,content
|
| 249 |
+
248,175499,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",277,0,"",python,selection_keyboard
|
| 250 |
+
249,179109,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",277,0,"a",python,content
|
| 251 |
+
250,179111,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"",python,selection_keyboard
|
| 252 |
+
251,179135,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"i",python,content
|
| 253 |
+
252,179138,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",279,0,"",python,selection_keyboard
|
| 254 |
+
253,179218,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",279,0,"n",python,content
|
| 255 |
+
254,179220,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,0,"",python,selection_keyboard
|
| 256 |
+
255,183073,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",281,0,"if __name__ == main\n",python,content
|
| 257 |
+
256,184618,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",279,0,"",python,selection_command
|
| 258 |
+
257,185264,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",299,0,"",python,selection_command
|
| 259 |
+
258,185830,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",281,20,"",python,content
|
| 260 |
+
259,186391,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",261,0,"",python,selection_command
|
| 261 |
+
260,186755,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,0,"",python,selection_command
|
| 262 |
+
261,187754,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,0,":",python,content
|
| 263 |
+
262,187756,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",281,0,"",python,selection_keyboard
|
| 264 |
+
263,192672,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,0,"",python,selection_command
|
| 265 |
+
264,200100,"/home/franz.srambical/crowd-pilot/crowd_pilot/insert_missing_csv_newlines.py",0,0,"#!/usr/bin/env python3\nfrom __future__ import annotations\n\nimport argparse\nimport io\nimport concurrent.futures\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable, Iterator, List, Optional, Pattern, Sequence, Tuple\n\n\nDEFAULT_TIMESTAMP_PATTERNS: Sequence[str] = (\n # Numeric timestamp format: e.g., 2218,3257761 (two integer groups separated by a comma)\n # Word-boundary-like guards to avoid partial matches inside larger numbers\n r""(?<!\d)\d+,\d+(?!\d)"",\n)\n\n\ndef compile_timestamp_regexes(patterns: Sequence[str]) -> List[Pattern[str]]:\n return [re.compile(p) for p in patterns]\n\n\ndef find_timestamp_spans(text: str, regexes: Sequence[Pattern[str]]) -> List[Tuple[int, int]]:\n spans: List[Tuple[int, int]] = []\n for rx in regexes:\n for m in rx.finditer(text):\n spans.append((m.start(), m.end()))\n spans.sort(key=lambda s: s[0])\n # Merge overlapping/adjacent spans coming from different regexes\n merged: List[Tuple[int, int]] = []\n for s in spans:\n if not merged or s[0] > merged[-1][1]:\n merged.append(list(s)) # type: ignore[list-item]\n else:\n prev_start, prev_end = merged[-1]\n merged[-1] = (prev_start, max(prev_end, s[1]))\n return [(int(a), int(b)) for a, b in merged]\n\n\ndef _is_inside_quotes(line: str, idx: int) -> bool:\n """"""Return True if the character position idx is inside a CSV quoted field.\n\n CSV quoting uses double quotes ("") and doubles them ("""") to escape.\n We scan from start to idx (exclusive) and toggle quote-state, skipping escaped quotes.\n """"""\n in_quotes = False\n i = 0\n # We only need quote state up to the index where a match begins\n while i < idx and i < len(line):\n ch = line[i]\n if ch == '""':\n # Escaped quote inside a quoted field: """"\n if in_quotes and i + 1 < idx and line[i + 1] == '""':\n i += 2\n continue\n in_quotes = not in_quotes\n i += 1\n return in_quotes\n\n\ndef find_row_start_indices(line: str, timestamp_regexes: List[Pattern[str]]) -> List[int]:\n """"""Find indices where a new CSV row likely starts within a (possibly merged) line.\n\n Heuristic:\n - A row start looks like digits,digits (first two numeric columns)\n - It must be OUTSIDE quoted fields\n - It must be either at the start of the line, or NOT immediately preceded by a comma\n (to avoid matching numeric pairs that are simply subsequent columns like 0,0)\n - It should be immediately followed by a comma (end of second numeric column)\n """"""\n indices: List[int] = []\n for rx in timestamp_regexes:\n for m in rx.finditer(line):\n s, e = m.start(), m.end()\n if _is_inside_quotes(line, s):\n continue\n prev = s - 1\n # Must be start-of-line or not immediately after a comma\n if prev >= 0 and line[prev] == ',':\n continue\n # Should be followed by a comma (after the second number ends)\n if e < len(line) and line[e] != ',':\n continue\n indices.append(s)\n\n # Sort and unique\n indices = sorted(set(indices))\n return indices\n\n\ndef needs_split(line: str, timestamp_regexes: List[Pattern[str]]) -> bool:\n starts = find_row_start_indices(line, timestamp_regexes)\n if len(starts) >= 2:\n return True\n if len(starts) == 1:\n # Split when a header-like prefix precedes the first timestamp\n prefix = line[: starts[0]]\n if prefix.strip("" ,;|\t\r\n"") != """":\n return True\n return False\n\n\ndef split_line_on_timestamps(line: str, timestamp_regexes: List[Pattern[str]], max_splits_per_line: int) -> List[str]:\n """"""\n Split a line into multiple lines when multiple timestamp tokens are present.\n\n Strategy:\n - Detect all timestamp spans (merged across patterns).\n - If multiple spans exist, start a new CSV row at each timestamp except the first.\n - Keep delimiters and content from each start to right before the next timestamp.\n - Trim leading whitespace/separators between chunks.\n """"""\n starts = find_row_start_indices(line, timestamp_regexes)\n if len(starts) == 0:\n return [line]\n\n # Build chunks: [0:first_start) is kept with first chunk if it's not just separators\n chunks: List[str] = []\n # Pre-chunk content\n prefix = line[: starts[0]]\n # If prefix has non-separator characters, keep it attached to the first chunk.\n # Otherwise, drop it.\n def is_only_separators(s: str) -> bool:\n return s.strip("" ,;|\t\r\n"") == """"\n\n effective_start = 0 if not is_only_separators(prefix) else starts[0]\n\n indices: List[int] = [effective_start] + starts\n # Ensure uniqueness and ascending\n indices = sorted(set(indices))\n\n for i, idx in enumerate(indices):\n next_idx = indices[i + 1] if i + 1 < len(indices) else len(line)\n segment = line[idx:next_idx]\n # Clean up leading separators carried over when we started mid-line\n segment = segment.lstrip("" \t,;|\r"")\n # Also strip trailing newline characters; we'll re-add newline at write time\n segment = segment.rstrip(""\r\n"")\n if segment:\n chunks.append(segment)\n\n if len(chunks) >= max_splits_per_line:\n raise ValueError(f""Suspiciously many splits in line: {line}"")\n\n return chunks if chunks else [line]\n\n\ndef iter_csv_files(root: Path) -> Iterator[Path]:\n for base, _dirs, files in os.walk(root):\n for name in files:\n if name.lower().endswith("".csv""):\n yield Path(base) / name\n\n\ndef atomic_write_text(target: Path, content: str) -> None:\n tmp_dir = target.parent\n with tempfile.NamedTemporaryFile(""w"", delete=False, dir=tmp_dir) as tf:\n tmp_path = Path(tf.name)\n tf.write(content)\n try:\n os.replace(tmp_path, target)\n except Exception:\n tmp_path.unlink(missing_ok=True)\n raise\n\n\ndef process_file(path: Path, timestamp_regexes: List[Pattern[str]], max_splits_per_line: int, dry_run: bool = False) -> Tuple[bool, int]:\n changed = False\n changes_count = 0\n with path.open(""r"", encoding=""utf-8"", errors=""replace"", newline="""") as f:\n original_lines = f.readlines()\n\n output_lines: List[str] = []\n for line in original_lines:\n if needs_split(line, timestamp_regexes):\n parts = split_line_on_timestamps(line, timestamp_regexes, max_splits_per_line)\n if len(parts) > 1:\n changed = True\n changes_count += len(parts) - 1\n for p in parts:\n output_lines.append(p + ""\n"")\n else:\n output_lines.append(line)\n\n if changed and not dry_run:\n atomic_write_text(path, """".join(output_lines))\n\n return changed, changes_count\n\n\ndef parse_args(argv: Optional[Sequence[str]] = None) -> argparse.Namespace:\n p = argparse.ArgumentParser(description=""Insert missing CSV newlines based on timestamp heuristics."")\n p.add_argument(""root"", type=str, help=""Root directory to scan recursively for .csv files"")\n p.add_argument(""--pattern"", ""-p"", action=""append"", default=list(DEFAULT_TIMESTAMP_PATTERNS),\n help=""Regex for timestamps (can be repeated). Default: numeric 'digits,digits'."")\n p.add_argument(""--dry-run"", action=""store_true"", help=""Do not modify files, just report changes"")\n p.add_argument(""--max-splits"", type=int, default=5, help=""Safety: maximum chunks per merged line"")\n p.add_argument(""--jobs"", ""-j"", type=int, default=os.cpu_count() or 1, help=""Number of parallel jobs"")\n return p.parse_args(argv)\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> int:\n args = parse_args(argv)\n root = Path(args.root)\n if not root.exists() or not root.is_dir():\n print(f""Root directory not found: {root}"", file=sys.stderr)\n return 2\n\n timestamp_regexes = compile_timestamp_regexes(args.pattern)\n\n all_files = list(iter_csv_files(root))\n total_files = len(all_files)\n modified_files = 0\n total_inserts = 0\n\n with concurrent.futures.ProcessPoolExecutor(max_workers=args.jobs) as executor:\n future_to_path = {\n executor.submit(process_file, p, timestamp_regexes, args.max_splits, bool(args.dry_run)): p\n for p in all_files\n }\n\n for future in concurrent.futures.as_completed(future_to_path):\n path = future_to_path[future]\n try:\n changed, count = future.result()\n if changed:\n modified_files += 1\n total_inserts += count\n action = ""WOULD FIX"" if args.dry_run else ""FIXED""\n print(f""{action}: {path} (+{count} newline(s))"")\n except Exception as exc:\n print(f""Error processing {path}: {exc}"", file=sys.stderr)\n\n print(f""Scanned {total_files} CSV file(s). Modified {modified_files}. Inserted {total_inserts} newline(s)."")\n return 0\n\n\nif __name__ == ""__main__"":\n raise SystemExit(main())\n\n\n",python,tab
|
| 266 |
+
265,202224,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",0,0,"",python,tab
|
| 267 |
+
266,202904,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",276,0,"",python,selection_command
|
| 268 |
+
267,204737,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",276,4,"",python,content
|
| 269 |
+
268,205105,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",276,0,"""""",python,content
|
| 270 |
+
269,205107,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",277,0,"",python,selection_keyboard
|
| 271 |
+
270,205296,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",277,1,"""",python,content
|
| 272 |
+
271,205298,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"",python,selection_keyboard
|
| 273 |
+
272,205944,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",277,0,"",python,selection_command
|
| 274 |
+
273,208578,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",277,0,"_",python,content
|
| 275 |
+
274,208580,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"",python,selection_keyboard
|
| 276 |
+
275,208891,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",278,0,"_",python,content
|
| 277 |
+
276,208893,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",279,0,"",python,selection_keyboard
|
| 278 |
+
277,209534,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",279,0,"m",python,content
|
| 279 |
+
278,209537,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,0,"",python,selection_keyboard
|
| 280 |
+
279,209693,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",280,0,"a",python,content
|
| 281 |
+
280,209696,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",281,0,"",python,selection_keyboard
|
| 282 |
+
281,209858,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",281,0,"i",python,content
|
| 283 |
+
282,209860,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",282,0,"",python,selection_keyboard
|
| 284 |
+
283,209866,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",282,0,"n",python,content
|
| 285 |
+
284,209868,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",283,0,"",python,selection_keyboard
|
| 286 |
+
285,210402,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",283,0,"_",python,content
|
| 287 |
+
286,210404,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",284,0,"",python,selection_keyboard
|
| 288 |
+
287,210520,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",284,0,"_",python,content
|
| 289 |
+
288,210522,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",285,0,"",python,selection_keyboard
|
| 290 |
+
289,214069,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",284,0,"",python,selection_command
|
| 291 |
+
290,214155,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",283,0,"",python,selection_command
|
| 292 |
+
291,214434,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",284,0,"",python,selection_command
|
| 293 |
+
292,214581,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",285,0,"",python,selection_command
|
| 294 |
+
293,216847,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",284,0,"",python,selection_command
|
| 295 |
+
294,217461,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",288,0,"",python,selection_command
|
| 296 |
+
295,218063,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",287,1,"",python,content
|
| 297 |
+
296,218330,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",287,0,"\n ",python,content
|
| 298 |
+
297,223856,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",292,0,"m",python,content
|
| 299 |
+
298,223859,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",293,0,"",python,selection_keyboard
|
| 300 |
+
299,224054,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",293,0,"a",python,content
|
| 301 |
+
300,224057,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",294,0,"",python,selection_keyboard
|
| 302 |
+
301,224088,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",294,0,"i",python,content
|
| 303 |
+
302,224090,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",295,0,"",python,selection_keyboard
|
| 304 |
+
303,224153,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",295,0,"n",python,content
|
| 305 |
+
304,224155,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",296,0,"",python,selection_keyboard
|
| 306 |
+
305,224840,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",296,0,"()",python,content
|
| 307 |
+
306,224843,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",297,0,"",python,selection_keyboard
|
| 308 |
+
307,224922,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",297,1,")",python,content
|
| 309 |
+
308,224925,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",298,0,"",python,selection_keyboard
|
| 310 |
+
309,230645,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",297,0,"",python,selection_command
|
| 311 |
+
310,231191,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",270,0,"",python,selection_command
|
| 312 |
+
311,231275,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",260,0,"",python,selection_command
|
| 313 |
+
312,231523,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",249,0,"",python,selection_command
|
| 314 |
+
313,231685,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",221,0,"",python,selection_command
|
| 315 |
+
314,231838,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",182,0,"",python,selection_command
|
| 316 |
+
315,231984,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",113,0,"",python,selection_command
|
| 317 |
+
316,232353,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",89,0,"",python,selection_command
|
| 318 |
+
317,242712,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",79,0,"",python,selection_command
|
| 319 |
+
318,243034,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",79,0,"\n",python,content
|
| 320 |
+
319,244364,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",80,0,"d",python,content
|
| 321 |
+
320,244366,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",81,0,"",python,selection_keyboard
|
| 322 |
+
321,244569,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",81,0,"e",python,content
|
| 323 |
+
322,244571,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",82,0,"",python,selection_keyboard
|
| 324 |
+
323,244888,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",82,0,"f",python,content
|
| 325 |
+
324,244890,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",83,0,"",python,selection_keyboard
|
| 326 |
+
325,251806,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",83,0," ",python,content
|
| 327 |
+
326,251809,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",84,0,"",python,selection_keyboard
|
| 328 |
+
327,252437,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",84,0,"m",python,content
|
| 329 |
+
328,252439,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",85,0,"",python,selection_keyboard
|
| 330 |
+
329,252718,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",85,0,"a",python,content
|
| 331 |
+
330,252721,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",86,0,"",python,selection_keyboard
|
| 332 |
+
331,252803,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",86,0,"a",python,content
|
| 333 |
+
332,252805,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",87,0,"",python,selection_keyboard
|
| 334 |
+
333,253552,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",86,1,"",python,content
|
| 335 |
+
334,253654,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",86,0,"i",python,content
|
| 336 |
+
335,253656,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",87,0,"",python,selection_keyboard
|
| 337 |
+
336,253926,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",87,0,"n",python,content
|
| 338 |
+
337,253928,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",88,0,"",python,selection_keyboard
|
| 339 |
+
338,254389,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",88,0,"()",python,content
|
| 340 |
+
339,254391,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",89,0,"",python,selection_keyboard
|
| 341 |
+
340,254455,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",89,1,")",python,content
|
| 342 |
+
341,254456,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",90,0,"",python,selection_keyboard
|
| 343 |
+
342,257542,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",90,0,":",python,content
|
| 344 |
+
343,257543,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",91,0,"",python,selection_keyboard
|
| 345 |
+
344,262775,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",103,0,"",python,selection_command
|
| 346 |
+
345,265322,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",102,0,"",python,selection_command
|
| 347 |
+
346,265721,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",92,0,"",python,selection_command
|
| 348 |
+
347,269128,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",115,0,"",python,selection_command
|
| 349 |
+
348,269968,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",92,0,"",python,selection_command
|
| 350 |
+
349,270287,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",116,0,"",python,selection_command
|
| 351 |
+
350,270404,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",185,0,"",python,selection_command
|
| 352 |
+
351,271979,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",224,0,"",python,selection_command
|
| 353 |
+
352,272144,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",252,0,"",python,selection_command
|
| 354 |
+
353,274532,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",252,1," ",python,selection_command
|
| 355 |
+
354,274878,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",224,1," ",python,selection_command
|
| 356 |
+
355,275017,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",185,1," ",python,selection_command
|
| 357 |
+
356,275163,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",116,1,"f",python,selection_command
|
| 358 |
+
357,275325,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",92,1,"i",python,selection_command
|
| 359 |
+
358,275667,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",252,0," ",python,content
|
| 360 |
+
359,275668,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",224,0," ",python,content
|
| 361 |
+
360,275668,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",185,0," ",python,content
|
| 362 |
+
361,275668,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",116,0," ",python,content
|
| 363 |
+
362,275668,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",92,0," ",python,content
|
| 364 |
+
363,275676,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",92,0,"",python,selection_command
|
| 365 |
+
364,279077,"/home/franz.srambical/crowd-pilot/crowd_pilot/read_dataset.py",80,0,"",python,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-036b3473-9f9d-44a0-a906-2567459f706c1754119162409-2025_08_02-09.19.29.910/source.csv
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"experiments/sample.sh",0,0,"source .venv/bin/activate\n\ndata_dir=""$PWD/data_arrayrecord/dummy""\nckpt_dir=""$PWD/checkpoints/causal_dynamics_openai_grain_tok_restore""\n\nexport PYTHONUNBUFFERED=1\nsrun ipython --pdb sample.py -- \\n --dyna_type ""causal"" \\n --batch_size 1 \\n --seq_len 3 \\n --start_frame 1 \\n --checkpoint $ckpt_dir \\n --data_dir $data_dir",shellscript,tab
|
| 3 |
+
2,310,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:19:29 AM [info] Activating crowd-code\n9:19:29 AM [info] Recording started\n9:19:29 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 4 |
+
3,337,"experiments/sample.sh",0,0,"",shellscript,tab
|
| 5 |
+
4,3606,"genie.py",0,0,"from typing import Dict\nimport time\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsCausal\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nnx.Module):\n """"""Genie model""""""\n\n def __init__(\n self,\n in_dim: int,\n tokenizer_dim: int,\n tokenizer_ffn_dim: int,\n latent_patch_dim: int,\n num_patch_latents: int,\n patch_size: int,\n tokenizer_num_blocks: int,\n tokenizer_num_heads: int,\n lam_dim: int,\n lam_ffn_dim: int,\n latent_action_dim: int,\n num_latent_actions: int,\n lam_patch_size: int,\n lam_num_blocks: int,\n lam_num_heads: int,\n lam_co_train: bool,\n dyna_type: str,\n dyna_dim: int,\n dyna_ffn_dim: int,\n dyna_num_blocks: int,\n dyna_num_heads: int,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n dropout: float = 0.0,\n mask_limit: float = 0.0,\n ):\n # --- Tokenizer ---\n self.in_dim = in_dim\n self.tokenizer_dim = tokenizer_dim\n self.tokenizer_ffn_dim = tokenizer_ffn_dim\n self.latent_patch_dim = latent_patch_dim\n self.num_patch_latents = num_patch_latents\n self.patch_size = patch_size\n self.tokenizer_num_blocks = tokenizer_num_blocks\n self.tokenizer_num_heads = tokenizer_num_heads\n # --- LAM ---\n self.lam_dim = lam_dim\n self.lam_ffn_dim = lam_ffn_dim\n self.latent_action_dim = latent_action_dim\n self.num_latent_actions = num_latent_actions\n self.lam_patch_size = lam_patch_size\n self.lam_num_blocks = lam_num_blocks\n self.lam_num_heads = lam_num_heads\n self.lam_co_train = lam_co_train\n # --- Dynamics ---\n self.dyna_type = dyna_type\n self.dyna_dim = dyna_dim\n self.dyna_ffn_dim = dyna_ffn_dim\n self.dyna_num_blocks = dyna_num_blocks\n self.dyna_num_heads = dyna_num_heads\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.dropout = dropout\n self.mask_limit = mask_limit\n\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n if self.dyna_type == ""maskgit"":\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n elif self.dyna_type == ""causal"":\n self.dynamics = DynamicsCausal(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n else:\n raise ValueError(f""Invalid dynamics type: {self.dyna_type}"")\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True\n ) -> Dict[str, jax.Array]:\n videos_BTHWC = batch[""videos""]\n tokenizer_outputs = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_indices_BTN = tokenizer_outputs[""indices""]\n lam_outputs = self.lam.vq_encode(videos_BTHWC, training=False)\n z_q_BTm11L = lam_outputs[""z_q""]\n action_indices_E = lam_outputs[""indices""]\n latent_actions_BTm11L = jax.lax.cond(\n self.lam_co_train,\n lambda: z_q_BTm11L,\n lambda: jax.lax.stop_gradient(z_q_BTm11L),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(token_indices_BTN),\n latent_actions=latent_actions_BTm11L,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_logits_BTNV, dyna_mask = self.dynamics(outputs, training)\n outputs[""token_logits""] = dyna_logits_BTNV\n if dyna_mask is not None:\n outputs[""mask""] = dyna_mask\n mle_indices_BTN = jnp.argmax(outputs[""token_logits""], axis=-1)\n H, W = batch[""videos""].shape[2:4]\n outputs[""recon""] = self.tokenizer.decode(mle_indices_BTN, (H, W))\n outputs[""lam_indices""] = action_indices_E\n return outputs\n\n # FIXME (f.srambical): sampling should be moved to the dynamics classes\n def sample(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> jax.Array:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n latent_actions_E = batch[""latent_actions""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n\n def maskgit_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array], step: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array, jax.Array, jax.Array], None]:\n rng, token_idxs_BSN, mask_BSN, action_tokens_EL = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # --- Construct + encode video ---\n vid_embed_BSNM = self.dynamics.patch_embed(token_idxs_BSN)\n mask_token_111M = self.dynamics.mask_token.value\n mask_expanded_BSN1 = mask_BSN[..., None]\n vid_embed_BSNM = jnp.where(mask_expanded_BSN1, mask_token_111M, vid_embed_BSNM)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = self.dynamics.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1]))\n vid_embed_BSNM += act_embed_BS1M\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (steps * 2))\n step_temp = temperature * (1.0 - unmasked_ratio)\n final_logits_BSNV = self.dynamics.transformer(vid_embed_BSNM) / step_temp\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_BSN = jnp.argmax(final_logits_BSNV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_BSN = jax.random.categorical(_rng, final_logits_BSNV)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs_BSN = gather_fn(\n jax.nn.softmax(final_logits_BSNV), sampled_token_idxs_BSN\n )\n final_token_probs_BSN += ~mask_BSN\n # Update masked tokens only\n token_idxs_BSN = jnp.where(mask_BSN, sampled_token_idxs_BSN, token_idxs_BSN)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask_N = jnp.arange(final_token_probs_BSN.shape[-1]) > num_unmasked_tokens\n sorted_idxs_BSN = jnp.argsort(final_token_probs_BSN, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask_N))\n new_mask_BSN = mask_update_fn(mask_BSN, sorted_idxs_BSN)\n\n new_carry = (rng, token_idxs_BSN, new_mask_BSN, action_tokens_EL)\n return new_carry, None\n\n def generation_step_fn(\n carry: tuple[jax.Array, jax.Array], step_t: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array], None]:\n rng, current_token_idxs_BSN = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask_S = jnp.arange(seq_len) >= step_t\n mask_BSN = jnp.broadcast_to(mask_S[None, :, None], (B, seq_len, N)).astype(\n bool\n )\n masked_token_idxs_BSN = current_token_idxs_BSN * ~mask_BSN\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs_BSN,\n mask_BSN,\n action_tokens_EL,\n )\n final_carry_maskgit, _ = jax.lax.scan(\n maskgit_step_fn, init_carry_maskgit, jnp.arange(steps)\n )\n updated_token_idxs_BSN = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs_BSN)\n return new_carry, None\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs_BSN)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs_BSN = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC\n\n def sample_causal(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> jax.Array:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n # FIXME (f.srambical): reset spatial kv cache after each frame\n assert isinstance(self.dynamics, DynamicsCausal)\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n latent_actions_E = batch[""latent_actions""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n dynamics_causal: DynamicsCausal = self.dynamics\n\n for block in dynamics_causal.transformer.blocks:\n block.spatial_attention.init_cache((B * seq_len, (N + 1), self.dyna_dim), dtype=self.dtype)\n block.temporal_attention.init_cache((B * (N + 1), seq_len, self.dyna_dim), dtype=self.dtype)\n\n @nnx.jit\n def causal_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array], step_n: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array, jax.Array, jax.Array], None]:\n rng, token_idxs_BSN, action_tokens_EL, step_t = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # --- Construct + encode video ---\n vid_embed_BSNM = dynamics_causal.patch_embed(token_idxs_BSN)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = dynamics_causal.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1]))\n vid_embed_BSNp1M = jnp.concatenate([act_embed_BS1M, vid_embed_BSNM], axis=2)\n final_logits_BTNp1V = dynamics_causal.transformer(vid_embed_BSNp1M, (step_t, step_n)) / temperature\n final_logits_BV = final_logits_BTNp1V[:, step_t, step_n, :]\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_B = jnp.argmax(final_logits_BV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_B = jax.random.categorical(_rng, final_logits_BV)\n # Update next tokens only\n token_idxs_BSN = token_idxs_BSN.at[:, step_t, step_n].set(sampled_token_idxs_B)\n step_t += 1\n\n new_carry = (rng, token_idxs_BSN, action_tokens_EL, step_t)\n return new_carry, None\n\n # --- Run the autoregressive generation using a for loop ---\n rng = batch[""rng""]\n current_token_idxs_BSN = token_idxs_BSN\n \n for step_t in range(T, seq_len):\n rng, step_rng = jax.random.split(rng)\n\n # --- Reset spatial KV caches before each frame ---\n # for block in dynamics_causal.transformer.blocks:\n # block.spatial_attention.init_cache((B * seq_len, (N + 1), self.dyna_dim), dtype=self.dtype)\n #breakpoint()\n\n # --- Initialize and run causal loop ---\n init_carry_causal = (\n step_rng,\n current_token_idxs_BSN,\n action_tokens_EL,\n jnp.array(step_t, dtype=jnp.int32),\n )\n\n # current_token_idxs_BSN.block_until_ready()\n # start = time.time()\n final_carry_causal, _ = jax.lax.scan(\n causal_step_fn, init_carry_causal, jnp.arange(N)\n )\n # final_carry_causal[1].block_until_ready()\n # elapsed = time.time() - start\n # print(f""Autoregressive generation time: {elapsed:.4f}s"")\n # breakpoint()\n current_token_idxs_BSN = final_carry_causal[1]\n \n final_token_idxs_BSN = current_token_idxs_BSN\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC\n\n def vq_encode(self, batch: Dict[str, jax.Array], training: bool) -> jax.Array:\n # --- Preprocess videos ---\n video_BTHWC = batch[""videos""]\n lam_output = self.lam.vq_encode(video_BTHWC, training=training)\n lam_indices_E = lam_output[""indices""]\n return lam_indices_E\n\n# FIXME (f.srambical): add conversion script for old checkpoints\ndef restore_genie_components(\n optimizer: nnx.Optimizer,\n sharding: jax.sharding.NamedSharding,\n rng: jax.Array,\n args,\n) -> nnx.Optimizer:\n """"""Restore pre-trained Genie components""""""\n rngs = nnx.Rngs(rng)\n\n tx = optimizer.tx\n model = optimizer.model\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_tokenizer_optimizer = nnx.Optimizer(dummy_tokenizer, tx)\n dummy_tokenizer_optimizer_state = nnx.state(dummy_tokenizer_optimizer)\n abstract_sharded_tokenizer_optimizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_optimizer_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_tokenizer_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_tokenizer_optimizer.model, restored_tokenizer.model)\n model.tokenizer = dummy_tokenizer_optimizer.model\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_lam_optimizer = nnx.Optimizer(dummy_lam, tx)\n dummy_lam_optimizer_state = nnx.state(dummy_lam_optimizer)\n abstract_sharded_lam_optimizer_state = _create_abstract_sharded_pytree(\n dummy_lam_optimizer_state, sharding\n )\n restored_lam_optimizer = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_lam_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_lam_optimizer.model, restored_lam_optimizer.model)\n model.lam = dummy_lam_optimizer.model\n # Remove the LAM decoder to save memory and avoid unnecessary computation.\n del model.lam.decoder\n lam_checkpoint_manager.close()\n \n # Reinitialize the optimizer states\n optimizer = nnx.Optimizer(model, tx)\n return optimizer\n\n\ndef _create_abstract_sharded_pytree(\n pytree_template: nnx.GraphState, sharding_spec: jax.sharding.NamedSharding\n) -> jax.Array:\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab
|
| 6 |
+
5,7015,"experiments/sample.sh",0,0,"",shellscript,tab
|
| 7 |
+
6,38268,"genie.py",0,0,"",python,tab
|
| 8 |
+
7,105465,"genie.py",16512,1," ",python,selection_command
|
| 9 |
+
8,105575,"genie.py",16511,2,"# ",python,selection_command
|
| 10 |
+
9,105751,"genie.py",16511,2,"# ",python,selection_command
|
| 11 |
+
10,105915,"genie.py",16574,2,"",python,content
|
| 12 |
+
11,105915,"genie.py",16511,2,"",python,content
|
| 13 |
+
12,105926,"genie.py",16511,0,"",python,selection_command
|
| 14 |
+
13,292966,"genie.py",16514,0,"",python,selection_mouse
|
| 15 |
+
14,304442,"genie.py",14498,0,"",python,selection_command
|
| 16 |
+
15,307625,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\nimport math\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_type: str = ""maskgit""\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n """"""\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n # jax.distributed.initialize()\n\n rng = jax.random.key(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=True,\n rngs=rngs,\n )\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.Optimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n def _sampling_fn(model: Genie, batch: dict) -> jax.Array:\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n if args.dyna_type == ""maskgit"":\n return model.sample(\n batch,\n args.seq_len,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n )\n elif args.dyna_type == ""causal"":\n return model.sample_causal(\n batch,\n args.seq_len,\n args.temperature,\n args.sample_argmax,\n )\n else:\n raise ValueError(f""Invalid dynamics type: {args.dyna_type}"")\n\n # --- Define autoregressive sampling loop ---\n # FIXME (f.srambical): why is kv caching not working with nnx.jit?\n #@nnx.jit\n def _autoreg_sample(genie, rng, video_batch_BSHWC, action_batch_E):\n input_video_BTHWC = video_batch_BSHWC[:, : args.start_frame]\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=input_video_BTHWC, latent_actions=action_batch_E, rng=_rng)\n generated_vid_BSHWC = _sampling_fn(genie, batch)\n return generated_vid_BSHWC\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n # We don't use workers in order to avoid grain shutdown issues (https://github.com/google/grain/issues/398)\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = iter(dataloader)\n video_batch_BSHWC = next(dataloader)\n gt_video = jnp.asarray(video_batch_BSHWC, dtype=jnp.float32) / 255.0\n video_batch_BSHWC = gt_video.astype(args.dtype)\n # Get latent actions for all videos in the batch\n batch = dict(videos=video_batch_BSHWC)\n action_batch_E = genie.vq_encode(batch, training=False)\n\n # --- Sample + evaluate video ---\n # B, S, H, W, _ = video_batch_BSHWC.shape\n # N = math.ceil(H / args.patch_size) * math.ceil(W / args.patch_size)\n # for block in genie.dynamics.transformer.blocks:\n # block.spatial_attention.init_cache((B * S, 1, args.dyna_dim), dtype=args.dtype)\n # block.temporal_attention.init_cache((B * (N + 1), 1, args.dyna_dim), dtype=args.dtype)\n\n recon_video_BSHWC = _autoreg_sample(genie, rng, video_batch_BSHWC, action_batch_E)\n recon_video_BSHWC = recon_video_BSHWC.astype(jnp.float32)\n gt = gt_video[:, : recon_video_BSHWC.shape[1]].clip(0, 1).reshape(-1, *gt_video.shape[2:])\n recon = recon_video_BSHWC.clip(0, 1).reshape(-1, *recon_video_BSHWC.shape[2:])\n ssim = jnp.asarray(\n pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :])\n ).mean()\n print(f""SSIM: {ssim}"")\n\n # --- Construct video ---\n true_videos = (gt_video * 255).astype(np.uint8)\n pred_videos = (recon_video_BSHWC * 255).astype(np.uint8)\n video_comparison = np.zeros((2, *recon_video_BSHWC.shape), dtype=np.uint8)\n video_comparison[0] = true_videos[:, : args.seq_len]\n video_comparison[1] = pred_videos\n frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # --- Save video ---\n imgs = [Image.fromarray(img) for img in frames]\n # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n B, S, _, _, _ = video_batch_BSHWC.shape\n action_batch_BSm11 = jnp.reshape(action_batch_E, (B, S-1, 1))\n for t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch_BSm11.shape[0]):\n action = action_batch_BSm11[row, t, 0]\n y_offset = row * video_batch_BSHWC.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\n imgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n )\n",python,tab
|
| 17 |
+
16,307640,"sample.py",34,0,"",python,selection_command
|
| 18 |
+
17,307825,"sample.py",34,0,"t",python,content
|
| 19 |
+
18,307826,"sample.py",35,0,"",python,selection_keyboard
|
| 20 |
+
19,308512,"sample.py",34,1,"",python,content
|
| 21 |
+
20,311448,"sample.py",5133,0,"",python,selection_command
|
| 22 |
+
21,311804,"sample.py",5148,0,"",python,selection_command
|
| 23 |
+
22,311986,"sample.py",5133,0,"",python,selection_command
|
| 24 |
+
23,312412,"sample.py",5148,0,"",python,selection_command
|
| 25 |
+
24,313409,"genie.py",0,0,"",python,tab
|
| 26 |
+
25,319228,"genie.py",14493,0,"",python,selection_command
|
| 27 |
+
26,319668,"genie.py",14493,0,"#",python,content
|
| 28 |
+
27,319668,"genie.py",14494,0,"",python,selection_keyboard
|
| 29 |
+
28,320188,"genie.py",14493,0,"",python,selection_command
|
| 30 |
+
29,377277,"genie.py",14493,1,"",python,content
|
| 31 |
+
30,436763,"TERMINAL",0,0,"",,terminal_focus
|
| 32 |
+
31,438020,"TERMINAL",0,0,"source /home/franz.srambical/jafar/.venv/bin/activate",,terminal_command
|
| 33 |
+
32,438021,"TERMINAL",0,0,"]633;C]0;franz.srambical@hai-login1:~/jafar",,terminal_output
|
| 34 |
+
33,440321,"TERMINAL",0,0,"bash",,terminal_focus
|
| 35 |
+
34,557954,"genie.py",0,0,"",python,tab
|
| 36 |
+
35,558811,"genie.py",14510,0,"",python,selection_command
|
| 37 |
+
36,558914,"genie.py",14538,0,"",python,selection_command
|
| 38 |
+
37,558943,"genie.py",14626,0,"",python,selection_command
|
| 39 |
+
38,558975,"genie.py",14703,0,"",python,selection_command
|
| 40 |
+
39,558975,"genie.py",14769,0,"",python,selection_command
|
| 41 |
+
40,558975,"genie.py",14813,0,"",python,selection_command
|
| 42 |
+
41,558975,"genie.py",14848,0,"",python,selection_command
|
| 43 |
+
42,558975,"genie.py",14857,0,"",python,selection_command
|
| 44 |
+
43,559208,"genie.py",14904,0,"",python,selection_command
|
| 45 |
+
44,559208,"genie.py",14969,0,"",python,selection_command
|
| 46 |
+
45,559208,"genie.py",14978,0,"",python,selection_command
|
| 47 |
+
46,559208,"genie.py",15019,0,"",python,selection_command
|
| 48 |
+
47,559208,"genie.py",15098,0,"",python,selection_command
|
| 49 |
+
48,559208,"genie.py",15175,0,"",python,selection_command
|
| 50 |
+
49,559279,"genie.py",15254,0,"",python,selection_command
|
| 51 |
+
50,559280,"genie.py",15346,0,"",python,selection_command
|
| 52 |
+
51,559297,"genie.py",15435,0,"",python,selection_command
|
| 53 |
+
52,559333,"genie.py",15547,0,"",python,selection_command
|
| 54 |
+
53,559337,"genie.py",15611,0,"",python,selection_command
|
| 55 |
+
54,559367,"genie.py",15620,0,"",python,selection_command
|
| 56 |
+
55,559549,"genie.py",15676,0,"",python,selection_command
|
| 57 |
+
56,559597,"genie.py",15706,0,"",python,selection_command
|
| 58 |
+
57,559598,"genie.py",15782,0,"",python,selection_command
|
| 59 |
+
58,559598,"genie.py",15800,0,"",python,selection_command
|
| 60 |
+
59,559598,"genie.py",15850,0,"",python,selection_command
|
| 61 |
+
60,559642,"genie.py",15935,0,"",python,selection_command
|
| 62 |
+
61,559643,"genie.py",15973,0,"",python,selection_command
|
| 63 |
+
62,559652,"genie.py",16065,0,"",python,selection_command
|
| 64 |
+
63,559673,"genie.py",16081,0,"",python,selection_command
|
| 65 |
+
64,559685,"genie.py",16090,0,"",python,selection_command
|
| 66 |
+
65,559713,"genie.py",16162,0,"",python,selection_command
|
| 67 |
+
66,559714,"genie.py",16189,0,"",python,selection_command
|
| 68 |
+
67,559735,"genie.py",16198,0,"",python,selection_command
|
| 69 |
+
68,559839,"genie.py",16267,0,"",python,selection_command
|
| 70 |
+
69,559878,"genie.py",16294,0,"",python,selection_command
|
| 71 |
+
70,559917,"genie.py",16341,0,"",python,selection_command
|
| 72 |
+
71,559993,"genie.py",16351,0,"",python,selection_command
|
| 73 |
+
72,560246,"genie.py",16392,0,"",python,selection_command
|
| 74 |
+
73,560247,"genie.py",16434,0,"",python,selection_command
|
| 75 |
+
74,560247,"genie.py",16443,0,"",python,selection_command
|
| 76 |
+
75,560247,"genie.py",16507,0,"",python,selection_command
|
| 77 |
+
76,560271,"genie.py",16568,0,"",python,selection_command
|
| 78 |
+
77,560279,"genie.py",16676,0,"",python,selection_command
|
| 79 |
+
78,560881,"genie.py",16694,0,"",python,selection_command
|
| 80 |
+
79,560967,"genie.py",16703,0,"",python,selection_command
|
| 81 |
+
80,561002,"genie.py",16756,0,"",python,selection_command
|
| 82 |
+
81,561017,"genie.py",16790,0,"",python,selection_command
|
| 83 |
+
82,561038,"genie.py",16816,0,"",python,selection_command
|
| 84 |
+
83,561096,"genie.py",16856,0,"",python,selection_command
|
| 85 |
+
84,561475,"genie.py",16890,0,"",python,selection_command
|
| 86 |
+
85,561510,"genie.py",16942,0,"",python,selection_command
|
| 87 |
+
86,561536,"genie.py",16890,0,"",python,selection_command
|
| 88 |
+
87,562376,"genie.py",16856,0,"",python,selection_command
|
| 89 |
+
88,562840,"genie.py",16816,0,"",python,selection_command
|
| 90 |
+
89,562857,"genie.py",16856,0,"",python,selection_command
|
| 91 |
+
90,563111,"genie.py",16890,0,"",python,selection_command
|
| 92 |
+
91,563128,"genie.py",16942,0,"",python,selection_command
|
| 93 |
+
92,563206,"genie.py",16948,0,"",python,selection_command
|
| 94 |
+
93,563208,"genie.py",16957,0,"",python,selection_command
|
| 95 |
+
94,563239,"genie.py",17014,0,"",python,selection_command
|
| 96 |
+
95,563254,"genie.py",17048,0,"",python,selection_command
|
| 97 |
+
96,563255,"genie.py",17098,0,"",python,selection_command
|
| 98 |
+
97,563288,"genie.py",17163,0,"",python,selection_command
|
| 99 |
+
98,563409,"genie.py",17177,0,"",python,selection_command
|
| 100 |
+
99,563440,"genie.py",17163,0,"",python,selection_command
|
| 101 |
+
100,563576,"genie.py",17098,0,"",python,selection_command
|
| 102 |
+
101,563588,"genie.py",17048,0,"",python,selection_command
|
| 103 |
+
102,563920,"genie.py",17014,0,"",python,selection_command
|
| 104 |
+
103,563988,"genie.py",16957,0,"",python,selection_command
|
| 105 |
+
104,564001,"genie.py",16948,0,"",python,selection_command
|
| 106 |
+
105,564006,"genie.py",16942,0,"",python,selection_command
|
| 107 |
+
106,564028,"genie.py",16890,0,"",python,selection_command
|
| 108 |
+
107,564032,"genie.py",16856,0,"",python,selection_command
|
| 109 |
+
108,564319,"genie.py",16816,0,"",python,selection_command
|
| 110 |
+
109,564319,"genie.py",16790,0,"",python,selection_command
|
| 111 |
+
110,564333,"genie.py",16756,0,"",python,selection_command
|
| 112 |
+
111,564357,"genie.py",16703,0,"",python,selection_command
|
| 113 |
+
112,564357,"genie.py",16694,0,"",python,selection_command
|
| 114 |
+
113,564518,"genie.py",16676,0,"",python,selection_command
|
| 115 |
+
114,564650,"genie.py",16568,0,"",python,selection_command
|
| 116 |
+
115,565429,"genie.py",16507,0,"",python,selection_command
|
| 117 |
+
116,565576,"genie.py",16499,60," for block in dynamics_causal.transformer.blocks:",python,selection_command
|
| 118 |
+
117,565668,"genie.py",16499,168," for block in dynamics_causal.transformer.blocks:\n block.spatial_attention.init_cache((B * seq_len, (N + 1), self.dyna_dim), dtype=self.dtype)",python,selection_command
|
| 119 |
+
118,620912,"genie.py",16568,0,"",python,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-091406d7-396d-478a-8984-cf3f17325f1e1761067614689-2025_10_21-19.27.02.112/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0a6116e9-6ace-40c6-ab67-84982fd29f7e1760875742620-2025_10_19-14.09.09.963/source.csv
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"slurm/jobs/alfred/helmholtz_cluster/jafar_og_reproduction/og_coinrun_dynamics_reproduction.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --gres=gpu:1\n#SBATCH --time=3-00:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --partition=gpu_p\n#SBATCH --reservation=haicu_stefan\n#SBATCH --qos=gpu_long\n#SBATCH --output=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/logs/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=train_dynamics_coinrun_og_reproduction\n\n# Log the sbatch script\ncat $0\n\nsource .venv/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\ntags=""coinrun_og dynanmics 10m_dataset helmholtz_reproduction dyn_repro""\n\nnpy_records_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/data/coinrun_episodes/coinrun_episodes""\n\ntokenizer_ckpt_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/tokenizer/train_tokenizer_coinrun_og_reproduction_28246778/tokenizer_1756303195_110000""\nlam_ckpt_dir=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/lam/train_lam_coinrun_og_reproduction_28246647/lam_1756303037_200000""\nCHECKPOINT_DIR=""/lustre/groups/haicu/workspace/alfred.nguyen/jafar_worksapce/checkpoints/coinrun/dynamics/${job_name}/${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python jasmine/train_dynamics.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --tokenizer_checkpoint=""${tokenizer_ckpt_dir}"" \\n --lam_checkpoint=""${lam_ckpt_dir}"" \\n --log_image_interval=1000 \\n --log \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags ${tags} \\n --entity instant-uv \\n --project jafar \\n --data_dir $npy_records_dir\n",shellscript,tab
|
| 3 |
+
2,290,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:09:09 PM [info] Activating crowd-code\n2:09:09 PM [info] Recording started\n2:09:09 PM [info] Initializing git provider using file system watchers...\n2:09:10 PM [info] Git repository found\n2:09:10 PM [info] Git provider initialized successfully\n",Log,tab
|
| 4 |
+
3,1126,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"2:09:10 PM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,4200,"TERMINAL",0,0,"",,terminal_command
|
| 6 |
+
5,6695,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics_sample/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics_sample/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit_mila_submission_case_study_vanilla\n\n# Activate virtual environment\nsource .venv/bin/activate\n\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_10M_npy_arr_rec/array_record/test""\nCHECKPOINT_PATH=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/dynamics/dynamics_case_study_dataset_10M_30031""\n\ncurrent_branch=$(git rev-parse --abbrev-ref HEAD)\nif [ ""$current_branch"" != ""main"" ]; then\n echo ""This script must be run from the main branch. Current branch is $current_branch. Exiting.""\n exit 1\nfi\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --seed=1 \\n --maskgit_steps=1 \\n --tokenizer_ffn_dim=512 \\n --tokenizer_num_blocks=8 \\n --dyna_ffn_dim=512 \\n --dyna_num_blocks=12 \\n --output_dir=gifs/dynamics_case_study_dataset_10M_vanilla \\n --checkpoint $CHECKPOINT_PATH \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=32 \\n --patch_size=4 \\n --start_frame=4 \\n --image_height=64 \\n --image_width=64 \\n --dyna_type=maskgit\n",shellscript,tab
|
| 7 |
+
6,7248,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 8 |
+
7,7249,"extension-output-pdoom-org.crowd-code-#1-crowd-code",278,0,"",Log,selection_mouse
|
| 9 |
+
8,8102,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",0,0,"",shellscript,tab
|
| 10 |
+
9,9451,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 11 |
+
10,10977,"TERMINAL",0,0,"",,terminal_command
|
| 12 |
+
11,75062,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",0,0,"",shellscript,tab
|
| 13 |
+
12,75064,"TERMINAL",0,0,"",,terminal_focus
|
| 14 |
+
13,75507,"TERMINAL",0,0,"source /home/franz.srambical/jafar/.venv/bin/activate",,terminal_command
|
| 15 |
+
14,75518,"TERMINAL",0,0,"]633;C]0;franz.srambical@hai-login2:~/jafar",,terminal_output
|
| 16 |
+
15,118994,"TERMINAL",0,0,"",,terminal_command
|
| 17 |
+
16,145032,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics_sample/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics_sample/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit_mila_submission\n\n# Activate virtual environment\nsource .venv/bin/activate\n\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_500m_seed_w_increment/val""\nCHECKPOINT_PATH=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/dynamics/dynamics_coinrun_mila_submission/29973""\n\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --checkpoint $CHECKPOINT_PATH \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=4 \\n --patch_size=16 \\n --start_frame=4 \\n --image_height=64 \\n --image_width=64 \\n --dyna_type=maskgit\n",shellscript,tab
|
| 18 |
+
17,145033,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",39,0,"",shellscript,selection_command
|
| 19 |
+
18,149733,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics_sample/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics_sample/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit_mila_submission_case_study_action_prepend\n\n# Activate virtual environment\nsource .venv/bin/activate\n\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_10M_npy_arr_rec/array_record/test""\nCHECKPOINT_PATH=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/dynamics/dynamics_case_study_dataset_10M_action_prepend_30032""\n\ncurrent_branch=$(git rev-parse --abbrev-ref HEAD)\nif [ ""$current_branch"" != ""prepend-action-maskgit"" ]; then\n echo ""This script must be run from the prepend-action-maskgit branch. Current branch is $current_branch. Exiting.""\n exit 1\nfi\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --seed=1 \\n --maskgit_steps=1 \\n --tokenizer_ffn_dim=512 \\n --tokenizer_num_blocks=8 \\n --dyna_ffn_dim=512 \\n --dyna_num_blocks=12 \\n --output_dir=gifs/dynamics_case_study_dataset_10M_action_prepend \\n --checkpoint $CHECKPOINT_PATH \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=32 \\n --patch_size=4 \\n --start_frame=4 \\n --image_height=64 \\n --image_width=64 \\n --dyna_type=maskgit\n",shellscript,tab
|
| 20 |
+
19,151494,"slurm/dev/franz/berlin/atari/sample/spawn_sampler.sh",0,0,"#!/usr/bin/env bash\n\nset -euo pipefail\n\nREPO_ROOT=""/home/franz.srambical/jafar""\n\nDATA_ROOT=""${DATA_ROOT:-/fast/project/HFMI_SynergyUnit/jafar_ws/data/atari}""\n\nSAMPLE_SCRIPT=""${SAMPLE_SCRIPT:-$REPO_ROOT/slurm/dev/franz/berlin/atari/sample/sample_atari.sh}""\n\nif [ ""$#"" -gt 0 ]; then\n ENV_LIST=(""$@"")\nelse\n if [ ! -d ""$DATA_ROOT"" ]; then\n echo ""DATA_ROOT does not exist: $DATA_ROOT"" >&2\n exit 1\n fi\n mapfile -t ENV_LIST < <(find ""$DATA_ROOT"" -mindepth 1 -maxdepth 1 -type d -printf '%f\n' | sort)\nfi\n\necho ""Submitting Sample jobs for environments: ${ENV_LIST[*]}""\n\nfor env in ""${ENV_LIST[@]}""; do\n env_dir=""$DATA_ROOT/$env""\n if [ ! -d ""$env_dir/val"" ]; then\n echo ""Skipping $env (missing val under $env_dir)""\n continue\n fi\n\n job_name=""sample_atari_${env}_maskgit""\n\n job_id=$(sbatch --parsable \\n --job-name=""$job_name"" \\n --chdir=""$REPO_ROOT"" \\n --export=ALL,ENV_NAME=""$env"" \\n ""$SAMPLE_SCRIPT"")\n\n echo ""Submitted $job_name (ENV_NAME=$env) as job $job_id""\ndone\n\n\n",shellscript,tab
|
| 21 |
+
20,152309,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh",0,0,"",shellscript,tab
|
| 22 |
+
21,153897,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",0,0,"",shellscript,tab
|
| 23 |
+
22,156362,"slurm/dev/franz/berlin/atari/sample/spawn_sampler.sh",0,0,"",shellscript,tab
|
| 24 |
+
23,157597,"slurm/dev/franz/berlin/atari/sample/sample_atari.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/atari/sample/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/atari/sample/%x_%j.log\n#SBATCH --job-name=atari_sample_maskgit\n\n# Activate virtual environment\nsource .venv/bin/activate\n\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/${ENV_NAME}/val/""\n\n# TODO: demon_attack is still training\n# TODO: add pong and chopper_command after fixing their tokenizer and finishing dynamics model training\ndeclare -A TOKENIZER_CKPT_MAP=(\n [alien]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/alien/dynamics/dynamics_atari_alien_dev_fleuret_hparams_patch_size_4/30523""\n [amidar]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/amidar/dynamics/dynamics_atari_amidar_dev_fleuret_hparams_patch_size_4/30524""\n [assault]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/assault/dynamics/dynamics_atari_assault_dev_fleuret_hparams_patch_size_4/30525""\n [asterix]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/asterix/dynamics/dynamics_atari_asterix_dev_fleuret_hparams_patch_size_4/30526""\n [bank_heist]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/bank_heist/dynamics/dynamics_atari_bank_heist_dev_fleuret_hparams_patch_size_4/30527""\n [battle_zone]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/battle_zone/dynamics/dynamics_atari_battle_zone_dev_fleuret_hparams_patch_size_4/30528""\n [boxing]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/boxing/dynamics/dynamics_atari_boxing_dev_fleuret_hparams_patch_size_4/30529""\n [breakout]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/breakout/dynamics/dynamics_atari_breakout_dev_fleuret_hparams_patch_size_4/30530""\n [crazy_climber]=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/atari/crazy_climber/dynamics/dynamics_atari_crazy_climber_dev_fleuret_hparams_patch_size_4/30531""\n [demon_attack]=""""\n [pong]=""""\n [chopper_command]=""""\n)\n\n# Allow explicit override via env var; otherwise use map\nCHECKPOINT_PATH=""${SAMPLE_CHECKPOINT:-${CHECKPOINT_PATH:-${TOKENIZER_CKPT_MAP[$ENV_NAME]}}}""\n\nif [ -z ""${ENV_NAME:-}"" ]; then\n echo ""ENV_NAME is not set; cannot locate data/checkpoints"" >&2\n exit 1\nfi\n\nif [ -z ""${CHECKPOINT_PATH}"" ]; then\n echo ""No checkpoint configured for ENV_NAME=$ENV_NAME. Set SAMPLE_CHECKPOINT or populate TOKENIZER_CKPT_MAP."" >&2\n exit 1\nfi\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --checkpoint $CHECKPOINT_PATH \\n --num_patch_latents=512 \\n --latent_patch_dim=512 \\n --tokenizer_dim=256 \\n --tokenizer_ffn_dim=256 \\n --tokenizer_num_heads=4 \\n --patch_size=4 \\n --latent_action_dim=512 \\n --image_height=84 \\n --image_width=84 \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=4 \\n --start_frame=4 \\n --dyna_type=maskgit\n",shellscript,tab
|
| 25 |
+
24,242062,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",0,0,"",shellscript,tab
|
| 26 |
+
25,243388,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1048,0,"",shellscript,selection_mouse
|
| 27 |
+
26,244281,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1049,0,"",shellscript,selection_command
|
| 28 |
+
27,244583,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1049,1,"",shellscript,content
|
| 29 |
+
28,245185,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1049,0,"2",shellscript,content
|
| 30 |
+
29,245185,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1050,0,"",shellscript,selection_keyboard
|
| 31 |
+
30,245297,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1050,0,"5",shellscript,content
|
| 32 |
+
31,245297,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1051,0,"",shellscript,selection_keyboard
|
| 33 |
+
32,245605,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1050,0,"",shellscript,selection_command
|
| 34 |
+
33,246724,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1029,25,"",shellscript,content
|
| 35 |
+
34,246734,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1033,0,"",shellscript,selection_command
|
| 36 |
+
35,249773,"jasmine/sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n print_action_indices: bool = True\n output_dir: str = ""gifs/""\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 1\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n use_gt_actions: bool = False\n # Dynamics checkpoint\n dyna_type: str = ""maskgit""\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n """"""\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n jax.distributed.initialize()\n\n rng = jax.random.key(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n use_gt_actions=args.use_gt_actions,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n # FIXME (f.srambical): implement spatiotemporal KV caching and set decode=True\n decode=False,\n rngs=rngs,\n )\n\n # Need to delete lam decoder for checkpoint loading\n if not args.use_gt_actions:\n assert genie.lam is not None\n del genie.lam.decoder\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.ModelAndOptimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n def _sampling_fn(model: Genie, batch: dict) -> jax.Array:\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n assert args.dyna_type in [\n ""maskgit"",\n ""causal"",\n ], f""Invalid dynamics type: {args.dyna_type}""\n frames, _ = model.sample(\n batch,\n args.seq_len,\n args.temperature,\n args.sample_argmax,\n args.maskgit_steps,\n )\n return frames\n\n # --- Define autoregressive sampling loop ---\n def _autoreg_sample(genie, rng, batch):\n batch[""videos""] = batch[""videos""][:, : args.start_frame]\n batch[""rng""] = rng\n generated_vid_BSHWC = _sampling_fn(genie, batch)\n return generated_vid_BSHWC\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n # We don't use workers in order to avoid grain shutdown issues (https://github.com/google/grain/issues/398)\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = iter(dataloader)\n batch = next(dataloader)\n gt_video = jnp.asarray(batch[""videos""], dtype=jnp.float32) / 255.0\n batch[""videos""] = gt_video.astype(args.dtype)\n # Get latent actions for all videos in the batch\n action_batch_E = None\n if not args.use_gt_actions:\n action_batch_E = genie.vq_encode(batch, training=False)\n batch[""latent_actions""] = action_batch_E\n\n # --- Sample + evaluate video ---\n recon_video_BSHWC = _autoreg_sample(genie, rng, batch)\n recon_video_BSHWC = recon_video_BSHWC.astype(jnp.float32)\n\n gt = gt_video.clip(0, 1)[:, args.start_frame :]\n recon = recon_video_BSHWC.clip(0, 1)[:, args.start_frame :]\n\n ssim_vmap = jax.vmap(pix.ssim, in_axes=(0, 0))\n psnr_vmap = jax.vmap(pix.psnr, in_axes=(0, 0))\n ssim = ssim_vmap(gt, recon)\n psnr = psnr_vmap(gt, recon)\n per_frame_ssim = ssim.mean(0)\n per_frame_psnr = psnr.mean(0)\n avg_ssim = ssim.mean()\n avg_psnr = psnr.mean()\n\n print(""Per-frame SSIM:\n"", per_frame_ssim)\n print(""Per-frame PSNR:\n"", per_frame_psnr)\n\n print(f""SSIM: {avg_ssim}"")\n print(f""PSNR: {avg_psnr}"")\n\n # --- Construct video ---\n true_videos = (gt_video * 255).astype(np.uint8)\n pred_videos = (recon_video_BSHWC * 255).astype(np.uint8)\n video_comparison = np.zeros((2, *recon_video_BSHWC.shape), dtype=np.uint8)\n video_comparison[0] = true_videos[:, : args.seq_len]\n video_comparison[1] = pred_videos\n frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # --- Save video ---\n imgs = [Image.fromarray(img) for img in frames]\n # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n B = batch[""videos""].shape[0]\n if action_batch_E is not None:\n action_batch_BSm11 = jnp.reshape(action_batch_E, (B, args.seq_len - 1, 1))\n else:\n action_batch_BSm11 = jnp.reshape(\n batch[""actions""][:, :-1], (B, args.seq_len - 1, 1)\n )\n for t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(B):\n if args.print_action_indices:\n action = action_batch_BSm11[row, t, 0]\n y_offset = row * batch[""videos""].shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\n\n os.makedirs(args.output_dir, exist_ok=True)\n imgs[0].save(\n os.path.join(args.output_dir, f""generation_{time.time()}.gif""),\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n )\n",python,tab
|
| 37 |
+
36,251863,"jasmine/sample.py",693,0,"",python,selection_command
|
| 38 |
+
37,252570,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",0,0,"",shellscript,tab
|
| 39 |
+
38,254267,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1063,0,"",shellscript,selection_command
|
| 40 |
+
39,254278,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1064,0,"",shellscript,selection_command
|
| 41 |
+
40,254408,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1064,0,"w",shellscript,content
|
| 42 |
+
41,254409,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1065,0,"",shellscript,selection_keyboard
|
| 43 |
+
42,254532,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1065,0,"\n ",shellscript,content
|
| 44 |
+
43,255507,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1069,0,"",shellscript,selection_command
|
| 45 |
+
44,255797,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1064,6,"",shellscript,content
|
| 46 |
+
45,257670,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh",0,0,"",shellscript,tab
|
| 47 |
+
46,259063,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh",1087,24,"",shellscript,content
|
| 48 |
+
47,259080,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh",1091,0,"",shellscript,selection_command
|
| 49 |
+
48,276832,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh",1172,0,"",shellscript,selection_mouse
|
| 50 |
+
49,276833,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh",1171,0,"",shellscript,selection_command
|
| 51 |
+
50,283709,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_action_prepend.sh",1172,0,"",shellscript,selection_command
|
| 52 |
+
51,284322,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",0,0,"",shellscript,tab
|
| 53 |
+
52,300326,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1095,0,"",shellscript,selection_command
|
| 54 |
+
53,300601,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1064,0,"",shellscript,selection_command
|
| 55 |
+
54,300875,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1034,0,"",shellscript,selection_command
|
| 56 |
+
55,301158,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1019,0,"",shellscript,selection_command
|
| 57 |
+
56,301434,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1034,0,"",shellscript,selection_command
|
| 58 |
+
57,301671,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1064,0,"",shellscript,selection_command
|
| 59 |
+
58,301707,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1095,0,"",shellscript,selection_command
|
| 60 |
+
59,301842,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1120,0,"",shellscript,selection_command
|
| 61 |
+
60,302038,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1147,0,"",shellscript,selection_command
|
| 62 |
+
61,302212,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1211,0,"",shellscript,selection_command
|
| 63 |
+
62,302355,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1247,0,"",shellscript,selection_command
|
| 64 |
+
63,302604,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1283,0,"",shellscript,selection_command
|
| 65 |
+
64,302630,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1302,0,"",shellscript,selection_command
|
| 66 |
+
65,302664,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1324,0,"",shellscript,selection_command
|
| 67 |
+
66,302813,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1345,0,"",shellscript,selection_command
|
| 68 |
+
67,303027,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1367,0,"",shellscript,selection_command
|
| 69 |
+
68,303438,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1345,0,"",shellscript,selection_command
|
| 70 |
+
69,303682,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1324,0,"",shellscript,selection_command
|
| 71 |
+
70,303706,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1302,0,"",shellscript,selection_command
|
| 72 |
+
71,303733,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1283,0,"",shellscript,selection_command
|
| 73 |
+
72,303772,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1247,0,"",shellscript,selection_command
|
| 74 |
+
73,303805,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1211,0,"",shellscript,selection_command
|
| 75 |
+
74,303850,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1147,0,"",shellscript,selection_command
|
| 76 |
+
75,303875,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1120,0,"",shellscript,selection_command
|
| 77 |
+
76,303905,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1095,0,"",shellscript,selection_command
|
| 78 |
+
77,303939,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1064,0,"",shellscript,selection_command
|
| 79 |
+
78,304068,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1034,0,"",shellscript,selection_command
|
| 80 |
+
79,304239,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission_case_study_vanilla.sh",1019,0,"",shellscript,selection_command
|
| 81 |
+
80,9643007,"TERMINAL",0,0,"uv pip show grain",,terminal_command
|
| 82 |
+
81,9643055,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 83 |
+
82,9643452,"TERMINAL",0,0,"Name: grain\r\nVersion: 0.2.12\r\nLocation: /fast/home/franz.srambical/jafar/.venv/lib/python3.13/site-packages\r\nRequires: absl-py, array-record, cloudpickle, dm-tree, etils, more-itertools, numpy, protobuf\r\nRequired-by: jasmine\r\n]0;franz.srambical@hai-login2:~/jafar",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0f6ed796-3fd2-4eb6-ad79-777c3b4353711750804480941-2025_06_25-00.35.32.827/source.csv
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,4,"train_lam.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n gt_future_frames = inputs[""videos""][:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@jax.jit\ndef train_step(state, inputs, action_last_active):\n # --- Update model ---\n rng, inputs[""rng""] = jax.random.split(inputs[""rng""])\n grad_fn = jax.value_and_grad(lam_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, idx_counts, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n\n # --- Reset inactive latent actions ---\n codebook = state.params[""params""][""vq""][""codebook""]\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook\n )\n state.params[""params""][""vq""][""codebook""] = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return state, loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n # Track when each action was last sampled\n action_last_active = jnp.zeros(args.num_latents)\n image_shape = (args.image_resolution, args.image_resolution, args.image_channels)\n rng, _rng = jax.random.split(rng)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = lam.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=lam.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n action_last_active = jax.device_put(action_last_active, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, action_last_active, metrics = train_step(\n train_state, inputs, action_last_active\n )\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0][1:]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""lam_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
| 3 |
+
2,3270,"TERMINAL",0,0,"/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
|
| 4 |
+
3,3288,"TERMINAL",0,0,"]633;E;/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;7bb61bf0-4c9f-4cd1-bf9e-c4e0d865bd72]633;C",,terminal_output
|
| 5 |
+
4,3315,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit02:/ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
|
| 6 |
+
5,45867,"train_lam.py",0,0,"",python,tab
|
| 7 |
+
6,45956,"train_lam.py",6991,0,"",python,selection_command
|
| 8 |
+
7,48071,"train_dynamics.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""dropout_rng""]}\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_resolution, args.image_resolution, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n step = 0\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _mask_rng = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(\n videos=videos,\n dropout_rng=_rng,\n mask_rng=_mask_rng,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
| 9 |
+
8,48073,"train_dynamics.py",6425,0,"",python,selection_command
|
| 10 |
+
9,50180,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\ndef create_training_functions(args):\n """"""Create training functions with args captured in closure.""""""\n \n def tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n @jax.jit\n def train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n \n return tokenizer_loss_fn, train_step\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n \n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n tokenizer_loss_fn, train_step = create_training_functions(args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = sorted(\n [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n )\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n # --- Start timing from dataloading through training ---\n start_time = time.time()\n \n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n \n # --- End timing after train step ---\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, total time (dataloading + training): {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, ""step_time_ms"": elapsed_time, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n start_time = time.time()\n",python,tab
|
| 11 |
+
10,50183,"train_tokenizer.py",1438,0,"",python,selection_command
|
| 12 |
+
11,66302,"train_tokenizer.py",6963,0,"",python,selection_mouse
|
| 13 |
+
12,66638,"train_tokenizer.py",6945,64," # --- Start timing from dataloading through training ---",python,selection_command
|
| 14 |
+
13,66997,"train_tokenizer.py",6945,97," # --- Start timing from dataloading through training ---\n start_time = time.time()",python,selection_command
|
| 15 |
+
14,67184,"train_tokenizer.py",6945,106," # --- Start timing from dataloading through training ---\n start_time = time.time()\n ",python,selection_command
|
| 16 |
+
15,67335,"train_tokenizer.py",6945,107,"",python,content
|
| 17 |
+
16,67363,"train_tokenizer.py",6953,0,"",python,selection_command
|
| 18 |
+
17,68265,"train_tokenizer.py",6920,0,"",python,selection_command
|
| 19 |
+
18,69139,"Untitled-1",0,0,"",plaintext,tab
|
| 20 |
+
19,70100,"train_tokenizer.py",0,0,"",python,tab
|
| 21 |
+
20,73237,"train_tokenizer.py",0,0,"",python,tab
|
| 22 |
+
21,73944,"train_tokenizer.py",6858,0,"",python,selection_command
|
| 23 |
+
22,74236,"train_tokenizer.py",6877,0,"",python,selection_command
|
| 24 |
+
23,74412,"train_tokenizer.py",6929,0,"",python,selection_command
|
| 25 |
+
24,74667,"train_tokenizer.py",6945,0," # --- Start timing from dataloading through training ---\n start_time = time.time()\n \n",python,content
|
| 26 |
+
25,75782,"train_tokenizer.py",8633,0,"",python,selection_command
|
| 27 |
+
26,77250,"train_tokenizer.py",9326,0,"",python,selection_command
|
| 28 |
+
27,78385,"train_tokenizer.py",9289,0,"",python,selection_command
|
| 29 |
+
28,78632,"train_tokenizer.py",9267,0,"",python,selection_command
|
| 30 |
+
29,78660,"train_tokenizer.py",9228,0,"",python,selection_command
|
| 31 |
+
30,78684,"train_tokenizer.py",9210,0,"",python,selection_command
|
| 32 |
+
31,78716,"train_tokenizer.py",9169,0,"",python,selection_command
|
| 33 |
+
32,78754,"train_tokenizer.py",9143,0,"",python,selection_command
|
| 34 |
+
33,78794,"train_tokenizer.py",9055,0,"",python,selection_command
|
| 35 |
+
34,78823,"train_tokenizer.py",9014,0,"",python,selection_command
|
| 36 |
+
35,78848,"train_tokenizer.py",8946,0,"",python,selection_command
|
| 37 |
+
36,78896,"train_tokenizer.py",8871,0,"",python,selection_command
|
| 38 |
+
37,78920,"train_tokenizer.py",8825,0,"",python,selection_command
|
| 39 |
+
38,78984,"train_tokenizer.py",8768,0,"",python,selection_command
|
| 40 |
+
39,79008,"train_tokenizer.py",8726,0,"",python,selection_command
|
| 41 |
+
40,79028,"train_tokenizer.py",8704,0,"",python,selection_command
|
| 42 |
+
41,79056,"train_tokenizer.py",8677,0,"",python,selection_command
|
| 43 |
+
42,79082,"train_tokenizer.py",8605,0,"",python,selection_command
|
| 44 |
+
43,79128,"train_tokenizer.py",8554,0,"",python,selection_command
|
| 45 |
+
44,79152,"train_tokenizer.py",8485,0,"",python,selection_command
|
| 46 |
+
45,79187,"train_tokenizer.py",8419,0,"",python,selection_command
|
| 47 |
+
46,79217,"train_tokenizer.py",8380,0,"",python,selection_command
|
| 48 |
+
47,79263,"train_tokenizer.py",8358,0,"",python,selection_command
|
| 49 |
+
48,79291,"train_tokenizer.py",8289,0,"",python,selection_command
|
| 50 |
+
49,79323,"train_tokenizer.py",8234,0,"",python,selection_command
|
| 51 |
+
50,79368,"train_tokenizer.py",8152,0,"",python,selection_command
|
| 52 |
+
51,79391,"train_tokenizer.py",8100,0,"",python,selection_command
|
| 53 |
+
52,79416,"train_tokenizer.py",8051,0,"",python,selection_command
|
| 54 |
+
53,79460,"train_tokenizer.py",7995,0,"",python,selection_command
|
| 55 |
+
54,79491,"train_tokenizer.py",7894,0,"",python,selection_command
|
| 56 |
+
55,79529,"train_tokenizer.py",7844,0,"",python,selection_command
|
| 57 |
+
56,79561,"train_tokenizer.py",7790,0,"",python,selection_command
|
| 58 |
+
57,79588,"train_tokenizer.py",7760,0,"",python,selection_command
|
| 59 |
+
58,79633,"train_tokenizer.py",7759,0,"",python,selection_command
|
| 60 |
+
59,79656,"train_tokenizer.py",7737,0,"",python,selection_command
|
| 61 |
+
60,79692,"train_tokenizer.py",7634,0,"",python,selection_command
|
| 62 |
+
61,79724,"train_tokenizer.py",7573,0,"",python,selection_command
|
| 63 |
+
62,79763,"train_tokenizer.py",7523,0,"",python,selection_command
|
| 64 |
+
63,79791,"train_tokenizer.py",7510,0,"",python,selection_command
|
| 65 |
+
64,79832,"train_tokenizer.py",7430,0,"",python,selection_command
|
| 66 |
+
65,79864,"train_tokenizer.py",7379,0,"",python,selection_command
|
| 67 |
+
66,79892,"train_tokenizer.py",7378,0,"",python,selection_command
|
| 68 |
+
67,79931,"train_tokenizer.py",7293,0,"",python,selection_command
|
| 69 |
+
68,79967,"train_tokenizer.py",7279,0,"",python,selection_command
|
| 70 |
+
69,79996,"train_tokenizer.py",7211,0,"",python,selection_command
|
| 71 |
+
70,80032,"train_tokenizer.py",7166,0,"",python,selection_command
|
| 72 |
+
71,80068,"train_tokenizer.py",7165,0,"",python,selection_command
|
| 73 |
+
72,80105,"train_tokenizer.py",7119,0,"",python,selection_command
|
| 74 |
+
73,80143,"train_tokenizer.py",7086,0,"",python,selection_command
|
| 75 |
+
74,80169,"train_tokenizer.py",7052,0,"",python,selection_command
|
| 76 |
+
75,80207,"train_tokenizer.py",7043,0,"",python,selection_command
|
| 77 |
+
76,80236,"train_tokenizer.py",7010,0,"",python,selection_command
|
| 78 |
+
77,82753,"train_tokenizer.py",7056,29,"",python,content
|
| 79 |
+
78,82753,"train_tokenizer.py",7051,1,"",python,content
|
| 80 |
+
79,82753,"train_tokenizer.py",7018,0," ",python,content
|
| 81 |
+
80,82754,"train_tokenizer.py",6953,0,"for videos in dataloader:\n ",python,content
|
| 82 |
+
81,84196,"train_tokenizer.py",6979,0,"",python,selection_command
|
| 83 |
+
82,84277,"train_tokenizer.py",6945,0,"",python,selection_command
|
| 84 |
+
83,84383,"train_tokenizer.py",6912,0,"",python,selection_command
|
| 85 |
+
84,84557,"train_tokenizer.py",6860,0,"",python,selection_command
|
| 86 |
+
85,84850,"train_tokenizer.py",6912,0,"",python,selection_command
|
| 87 |
+
86,85901,"train_tokenizer.py",6945,0,"",python,selection_command
|
| 88 |
+
87,86042,"train_tokenizer.py",6979,0,"",python,selection_command
|
| 89 |
+
88,86902,"train_tokenizer.py",7048,0,"",python,selection_command
|
| 90 |
+
89,87549,"train_tokenizer.py",7085,0,"",python,selection_command
|
| 91 |
+
90,87964,"train_tokenizer.py",7048,0,"",python,selection_command
|
| 92 |
+
91,88416,"train_tokenizer.py",7085,0,"",python,selection_command
|
| 93 |
+
92,88681,"train_tokenizer.py",7085,12," ",python,selection_command
|
| 94 |
+
93,88772,"train_tokenizer.py",7048,49," start_time = time.time()\n ",python,selection_command
|
| 95 |
+
94,88911,"train_tokenizer.py",6979,118," # --- Start timing from dataloading through training ---\n start_time = time.time()\n ",python,selection_command
|
| 96 |
+
95,89022,"train_tokenizer.py",6979,119,"",python,content
|
| 97 |
+
96,89036,"train_tokenizer.py",6991,0,"",python,selection_command
|
| 98 |
+
97,89860,"train_tokenizer.py",6957,0,"",python,selection_command
|
| 99 |
+
98,90316,"train_tokenizer.py",6924,0,"",python,selection_command
|
| 100 |
+
99,90320,"train_tokenizer.py",6957,0,"",python,selection_command
|
| 101 |
+
100,91443,"train_tokenizer.py",6991,0,"",python,selection_command
|
| 102 |
+
101,91638,"train_tokenizer.py",7024,0,"",python,selection_command
|
| 103 |
+
102,92015,"train_tokenizer.py",6991,0,"",python,selection_command
|
| 104 |
+
103,93099,"train_tokenizer.py",7024,0,"",python,selection_command
|
| 105 |
+
104,93348,"train_tokenizer.py",7058,0,"",python,selection_command
|
| 106 |
+
105,93365,"train_tokenizer.py",7071,0,"",python,selection_command
|
| 107 |
+
106,93407,"train_tokenizer.py",7116,0,"",python,selection_command
|
| 108 |
+
107,93439,"train_tokenizer.py",7184,0,"",python,selection_command
|
| 109 |
+
108,93465,"train_tokenizer.py",7198,0,"",python,selection_command
|
| 110 |
+
109,93504,"train_tokenizer.py",7271,0,"",python,selection_command
|
| 111 |
+
110,93536,"train_tokenizer.py",7284,0,"",python,selection_command
|
| 112 |
+
111,93568,"train_tokenizer.py",7335,0,"",python,selection_command
|
| 113 |
+
112,93608,"train_tokenizer.py",7414,0,"",python,selection_command
|
| 114 |
+
113,93644,"train_tokenizer.py",7428,0,"",python,selection_command
|
| 115 |
+
114,93956,"train_tokenizer.py",7414,0,"",python,selection_command
|
| 116 |
+
115,94133,"train_tokenizer.py",7403,12," ",python,selection_command
|
| 117 |
+
116,94339,"train_tokenizer.py",7403,62," \n # --- End timing after train step ---",python,selection_command
|
| 118 |
+
117,94588,"train_tokenizer.py",7403,123," \n # --- End timing after train step ---\n elapsed_time = (time.time() - start_time) * 1000",python,selection_command
|
| 119 |
+
118,94948,"train_tokenizer.py",7403,226," \n # --- End timing after train step ---\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, total time (dataloading + training): {elapsed_time}ms"")",python,selection_command
|
| 120 |
+
119,96750,"train_tokenizer.py",7403,227,"",python,content
|
| 121 |
+
120,96764,"train_tokenizer.py",7415,0,"",python,selection_command
|
| 122 |
+
121,97612,"train_tokenizer.py",7335,0,"",python,selection_command
|
| 123 |
+
122,98524,"train_tokenizer.py",7425,0,"",python,selection_command
|
| 124 |
+
123,98916,"train_tokenizer.py",7438,0,"",python,selection_command
|
| 125 |
+
124,99189,"train_tokenizer.py",7468,0,"",python,selection_command
|
| 126 |
+
125,99193,"train_tokenizer.py",7522,0,"",python,selection_command
|
| 127 |
+
126,99320,"train_tokenizer.py",7572,0,"",python,selection_command
|
| 128 |
+
127,100625,"train_tokenizer.py",7619,30,"",python,content
|
| 129 |
+
128,101864,"train_tokenizer.py",7522,0,"",python,selection_command
|
| 130 |
+
129,102947,"train_tokenizer.py",7572,0,"",python,selection_command
|
| 131 |
+
130,103688,"train_tokenizer.py",8962,0,"",python,selection_command
|
| 132 |
+
131,104057,"train_tokenizer.py",8925,0,"",python,selection_command
|
| 133 |
+
132,104325,"train_tokenizer.py",8925,37,"",python,content
|
| 134 |
+
133,105007,"train_tokenizer.py",8903,0,"",python,selection_command
|
| 135 |
+
134,106721,"train_tokenizer.py",7438,0,"",python,selection_command
|
| 136 |
+
135,107443,"train_tokenizer.py",7425,0,"",python,selection_command
|
| 137 |
+
136,107693,"train_tokenizer.py",7415,0,"",python,selection_command
|
| 138 |
+
137,107697,"train_tokenizer.py",7335,0,"",python,selection_command
|
| 139 |
+
138,107728,"train_tokenizer.py",7284,0,"",python,selection_command
|
| 140 |
+
139,107768,"train_tokenizer.py",7271,0,"",python,selection_command
|
| 141 |
+
140,107833,"train_tokenizer.py",7198,0,"",python,selection_command
|
| 142 |
+
141,107836,"train_tokenizer.py",7184,0,"",python,selection_command
|
| 143 |
+
142,107875,"train_tokenizer.py",7116,0,"",python,selection_command
|
| 144 |
+
143,107896,"train_tokenizer.py",7071,0,"",python,selection_command
|
| 145 |
+
144,107939,"train_tokenizer.py",7058,0,"",python,selection_command
|
| 146 |
+
145,107964,"train_tokenizer.py",7024,0,"",python,selection_command
|
| 147 |
+
146,109973,"train_tokenizer.py",0,0,"",python,tab
|
| 148 |
+
147,109976,"train_tokenizer.py",1438,0,"",python,selection_command
|
| 149 |
+
148,119550,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = sorted(\n [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n )\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
| 150 |
+
149,119551,"train_tokenizer.py",7012,0,"",python,selection_mouse
|
| 151 |
+
150,119564,"train_tokenizer.py",7011,0,"",python,selection_command
|
| 152 |
+
151,121459,"train_tokenizer.py",0,0,"",python,tab
|
| 153 |
+
152,121459,"train_tokenizer.py",7341,0,"",python,selection_mouse
|
| 154 |
+
153,121994,"train_tokenizer.py",7402,0,"\n ",python,content
|
| 155 |
+
154,122073,"train_tokenizer.py",7403,12,"",python,content
|
| 156 |
+
155,122161,"train_tokenizer.py",7403,0,"\n print(f""Step {step}, loss: {loss}"")",python,content
|
| 157 |
+
156,122186,"train_tokenizer.py",7416,0,"",python,selection_command
|
| 158 |
+
157,122520,"train_tokenizer.py",7403,0,"",python,selection_command
|
| 159 |
+
158,122857,"train_tokenizer.py",7403,1,"",python,content
|
| 160 |
+
159,122864,"train_tokenizer.py",7415,0,"",python,selection_command
|
| 161 |
+
160,123914,"train_tokenizer.py",7335,0,"",python,selection_command
|
| 162 |
+
161,125957,"train_tokenizer.py",0,0,"",python,tab
|
| 163 |
+
162,126804,"train_tokenizer.py",7058,0,"",python,selection_command
|
| 164 |
+
163,126945,"train_tokenizer.py",7071,0,"",python,selection_command
|
| 165 |
+
164,127208,"train_tokenizer.py",7116,0,"",python,selection_command
|
| 166 |
+
165,127246,"train_tokenizer.py",7184,0,"",python,selection_command
|
| 167 |
+
166,127259,"train_tokenizer.py",7198,0,"",python,selection_command
|
| 168 |
+
167,127300,"train_tokenizer.py",7271,0,"",python,selection_command
|
| 169 |
+
168,127332,"train_tokenizer.py",7284,0,"",python,selection_command
|
| 170 |
+
169,127355,"train_tokenizer.py",7335,0,"",python,selection_command
|
| 171 |
+
170,127547,"train_tokenizer.py",7415,0,"",python,selection_command
|
| 172 |
+
171,128816,"train_tokenizer.py",7335,0,"",python,selection_command
|
| 173 |
+
172,129954,"train_dynamics.py",0,0,"",python,tab
|
| 174 |
+
173,135187,"train_tokenizer.py",0,0,"",python,tab
|
| 175 |
+
174,135189,"train_tokenizer.py",1438,0,"",python,selection_command
|
| 176 |
+
175,163796,"train_tokenizer.py",1542,2164,"\n def tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n @jax.jit\n def train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n return tokenizer_loss_fn, train_step\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n\n",python,content
|
| 177 |
+
176,165840,"vscode.git.Git",0,0,"2025-06-25 00:35:32.896 [info] [main] Log level: Info\n2025-06-25 00:35:32.897 [info] [main] Validating found git in: ""git""\n2025-06-25 00:35:32.897 [info] [main] Using git ""2.43.5"" from ""git""\n2025-06-25 00:35:32.897 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [9ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [25ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --git-dir --git-common-dir [10ms]\n2025-06-25 00:35:32.897 [info] [Model][openRepository] Opened repository: /lustre/groups/haicu/workspace/franz.srambical/jafar\n2025-06-25 00:35:32.897 [info] > git config --get commit.template [27ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [13ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [5ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [20ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [18ms]\n2025-06-25 00:35:32.897 [info] > git status -z -uall [16ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [6ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [14ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [13ms]\n2025-06-25 00:35:32.897 [info] > git config --get commit.template [8ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [12ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [7ms]\n2025-06-25 00:35:32.897 [info] > git config --local branch.seeded-episode-sampling.vscode-merge-base [16ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [9ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/origin/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [955ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [957ms]\n2025-06-25 00:35:32.898 [info] > git merge-base refs/heads/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [31ms]\n2025-06-25 00:35:32.907 [info] > git rev-parse --path-format=relative --show-toplevel [33ms]\n2025-06-25 00:35:32.911 [info] > git status -z -uall [27ms]\n2025-06-25 00:35:32.921 [info] > git diff --name-status -z --diff-filter=ADMR 9e8a3860d8270b2dbf00311f48a6f8bf621b1aca...refs/remotes/origin/seeded-episode-sampling [14ms]\n2025-06-25 00:35:32.921 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [28ms]\n2025-06-25 00:35:32.926 [info] > git rev-parse --show-toplevel [6ms]\n2025-06-25 00:35:32.946 [info] > git rev-parse --path-format=relative --show-toplevel [15ms]\n2025-06-25 00:35:32.955 [info] > git rev-parse --show-toplevel [4ms]\n2025-06-25 00:35:32.967 [info] > git rev-parse --path-format=relative --show-toplevel [6ms]\n2025-06-25 00:35:32.984 [info] > git rev-parse --show-toplevel [10ms]\n2025-06-25 00:35:32.996 [info] > git rev-parse --path-format=relative --show-toplevel [6ms]\n2025-06-25 00:35:33.014 [info] > git rev-parse --show-toplevel [10ms]\n2025-06-25 00:35:33.026 [info] > git rev-parse --path-format=relative --show-toplevel [5ms]\n2025-06-25 00:35:33.037 [info] > git rev-parse --show-toplevel [4ms]\n2025-06-25 00:35:33.052 [info] > git rev-parse --path-format=relative --show-toplevel [5ms]\n2025-06-25 00:35:33.055 [info] [Model][doInitialScan] Initial repository scan completed - repositories (1), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-25 00:35:33.339 [info] > git fetch [1579ms]\n2025-06-25 00:35:33.365 [info] > git config --get commit.template [16ms]\n2025-06-25 00:35:33.373 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [18ms]\n2025-06-25 00:35:33.491 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [118ms]\n2025-06-25 00:35:33.537 [info] > git check-ignore -v -z --stdin [53ms]\n2025-06-25 00:35:33.539 [info] > git show --textconv :train_lam.py [39ms]\n2025-06-25 00:35:33.540 [info] > git status -z -uall [27ms]\n2025-06-25 00:35:33.542 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_lam.py [35ms]\n2025-06-25 00:35:33.554 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [33ms]\n2025-06-25 00:35:33.682 [info] > git cat-file -s 146d6b28f77470c22c6c6e602314c0eab8b3ddc7 [130ms]\n2025-06-25 00:35:34.083 [info] > git blame --root --incremental 474b9286d6d5b184c2eaaaf0bb077e1fa37d17fd -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_lam.py [73ms]\n2025-06-25 00:35:36.240 [info] > git config --local branch.seeded-episode-sampling.github-pr-owner-number [7ms]\n2025-06-25 00:35:36.241 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-25 00:35:38.986 [info] > git config --global user.name [5ms]\n2025-06-25 00:35:38.996 [info] > git config --global user.email [3ms]\n2025-06-25 00:35:38.996 [info] [main] Stored git author name in global state: Franz Srambical <franz.srambical@gmail.com>\n2025-06-25 00:36:17.120 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [67ms]\n2025-06-25 00:36:18.319 [info] > git show --textconv HEAD:train_lam.py [12ms]\n2025-06-25 00:36:18.326 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_lam.py [11ms]\n2025-06-25 00:36:20.578 [info] > git show --textconv HEAD:train_dynamics.py [13ms]\n2025-06-25 00:36:20.585 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_dynamics.py [11ms]\n2025-06-25 00:36:20.930 [info] > git blame --root --incremental 474b9286d6d5b184c2eaaaf0bb077e1fa37d17fd -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_dynamics.py [19ms]\n2025-06-25 00:36:22.669 [info] > git show --textconv HEAD:train_tokenizer.py [13ms]\n2025-06-25 00:36:22.675 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:36:23.042 [info] > git blame --root --incremental 474b9286d6d5b184c2eaaaf0bb077e1fa37d17fd -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [22ms]\n2025-06-25 00:36:42.950 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n2025-06-25 00:36:46.390 [info] > git show --textconv :train_tokenizer.py [12ms]\n2025-06-25 00:36:46.396 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [9ms]\n2025-06-25 00:36:46.410 [info] > git cat-file -s 78a39cc762967d4e4d8d4b77d06436da1d5dfcab [7ms]\n2025-06-25 00:37:07.312 [info] > git config --get commit.template [11ms]\n2025-06-25 00:37:07.324 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:37:07.352 [info] > git status -z -uall [16ms]\n2025-06-25 00:37:07.368 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:37:20.452 [info] > git config --get commit.template [11ms]\n2025-06-25 00:37:20.466 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:37:20.490 [info] > git status -z -uall [15ms]\n2025-06-25 00:37:20.502 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [21ms]\n2025-06-25 00:37:22.734 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [9ms]\n2025-06-25 00:37:32.415 [info] > git blame --root --incremental 474b9286d6d5b184c2eaaaf0bb077e1fa37d17fd -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [25ms]\n2025-06-25 00:37:39.144 [info] > git show --textconv :train_tokenizer.py [11ms]\n2025-06-25 00:37:39.150 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [7ms]\n2025-06-25 00:37:39.163 [info] > git cat-file -s 78a39cc762967d4e4d8d4b77d06436da1d5dfcab [5ms]\n2025-06-25 00:37:43.191 [info] > git show --textconv :train_dynamics.py [25ms]\n2025-06-25 00:37:43.207 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_dynamics.py [29ms]\n2025-06-25 00:37:43.222 [info] > git cat-file -s dbc227d8fd3182f9fee05ea8e52a1069189bde1d [6ms]\n2025-06-25 00:37:43.816 [info] > git config --get commit.template [10ms]\n2025-06-25 00:37:43.830 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:37:43.851 [info] > git status -z -uall [13ms]\n2025-06-25 00:37:43.872 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n2025-06-25 00:37:45.271 [info] > git config --get commit.template [11ms]\n2025-06-25 00:37:45.283 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:37:45.306 [info] > git status -z -uall [14ms]\n2025-06-25 00:37:45.321 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:37:46.652 [info] > git show --textconv HEAD:train_tokenizer.py [13ms]\n2025-06-25 00:37:46.661 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [13ms]\n2025-06-25 00:37:56.987 [info] > git add -A -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [14ms]\n2025-06-25 00:37:57.008 [info] > git config --get commit.template [8ms]\n2025-06-25 00:37:57.023 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:37:57.049 [info] > git status -z -uall [16ms]\n2025-06-25 00:37:57.062 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [21ms]\n2025-06-25 00:37:57.289 [info] > git diff --no-color [19ms]\n2025-06-25 00:37:57.766 [info] > git show --textconv HEAD:train_tokenizer.py [20ms]\n2025-06-25 00:37:57.767 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:37:58.253 [info] > git config --get commit.template [10ms]\n2025-06-25 00:37:58.267 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:37:58.294 [info] > git status -z -uall [15ms]\n2025-06-25 00:37:58.309 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:37:58.542 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [9ms]\n2025-06-25 00:37:58.553 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n2025-06-25 00:37:58.560 [info] > git cat-file -s 08294a61b35de2a5fa21c8e79aea18283358ceb6 [9ms]\n2025-06-25 00:37:58.760 [info] > git show --textconv HEAD:train_tokenizer.py [19ms]\n2025-06-25 00:37:58.769 [info] > git show --textconv :train_tokenizer.py [9ms]\n2025-06-25 00:38:03.351 [info] > git config --get commit.template [24ms]\n2025-06-25 00:38:03.354 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:38:03.381 [info] > git status -z -uall [18ms]\n2025-06-25 00:38:03.389 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [18ms]\n2025-06-25 00:38:15.274 [info] > git -c user.useConfigOnly=true commit --quiet --allow-empty-message --file - [1067ms]\n2025-06-25 00:38:15.274 [info] [WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804694-3444258.\nblack....................................................................Failed\n- hook id: black\n- files were modified by this hook\n\nreformatted train_tokenizer.py\n\nAll done! ✨ 🍰 ✨\n1 file reformatted.\n\n[INFO] Restored changes from /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804694-3444258.\n2025-06-25 00:38:15.291 [info] > git config --get-all user.name [7ms]\n2025-06-25 00:38:15.303 [info] > git config --get-all user.email [5ms]\n2025-06-25 00:38:15.321 [info] > git config --get commit.template [9ms]\n2025-06-25 00:38:15.335 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:38:15.362 [info] > git status -z -uall [18ms]\n2025-06-25 00:38:15.376 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n2025-06-25 00:38:16.289 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [9ms]\n2025-06-25 00:38:16.296 [info] > git diff --no-color [29ms]\n2025-06-25 00:38:16.299 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:38:16.303 [info] > git cat-file -s 08294a61b35de2a5fa21c8e79aea18283358ceb6 [8ms]\n2025-06-25 00:38:16.601 [info] > git show --textconv HEAD:train_tokenizer.py [12ms]\n2025-06-25 00:38:16.608 [info] > git show --textconv :train_tokenizer.py [10ms]\n2025-06-25 00:38:17.293 [info] > git config --get commit.template [12ms]\n2025-06-25 00:38:17.308 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:38:17.345 [info] > git status -z -uall [26ms]\n2025-06-25 00:38:17.346 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [8ms]\n2025-06-25 00:38:17.355 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:38:17.356 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [28ms]\n2025-06-25 00:38:17.363 [info] > git cat-file -s 08294a61b35de2a5fa21c8e79aea18283358ceb6 [9ms]\n2025-06-25 00:38:17.567 [info] > git show --textconv HEAD:train_tokenizer.py [13ms]\n2025-06-25 00:38:17.577 [info] > git show --textconv :train_tokenizer.py [11ms]\n",log,tab
|
| 178 |
+
177,168360,"vscode.git.Git",16332,0,"2025-06-25 00:38:20.780 [info] > git add -A -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [32ms]\n2025-06-25 00:38:20.802 [info] > git config --get commit.template [10ms]\n2025-06-25 00:38:20.817 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:38:20.848 [info] > git status -z -uall [19ms]\n2025-06-25 00:38:20.859 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [21ms]\n",log,content
|
| 179 |
+
178,168546,"vscode.git.Git",16973,0,"2025-06-25 00:38:21.077 [info] > git diff --no-color [15ms]\n",log,content
|
| 180 |
+
179,169868,"vscode.git.Git",17033,0,"2025-06-25 00:38:22.320 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:38:22.331 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n2025-06-25 00:38:22.341 [info] > git cat-file -s e0b223487cd0fd5e6b69aae9a58441a50592c4db [10ms]\n2025-06-25 00:38:22.379 [info] > git config --get commit.template [10ms]\n2025-06-25 00:38:22.392 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:38:22.419 [info] > git status -z -uall [17ms]\n2025-06-25 00:38:22.434 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n",log,content
|
| 181 |
+
180,170093,"vscode.git.Git",17916,0,"2025-06-25 00:38:22.531 [info] > git show --textconv HEAD:train_tokenizer.py [11ms]\n2025-06-25 00:38:22.539 [info] > git show --textconv :train_tokenizer.py [9ms]\n",log,content
|
| 182 |
+
181,171073,"vscode.git.Git",18079,0,"2025-06-25 00:38:23.538 [info] > git -c user.useConfigOnly=true commit --quiet --allow-empty-message --file - [739ms]\n2025-06-25 00:38:23.538 [info] [WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804703-3444332.\nblack....................................................................Passed\n[INFO] Restored changes from /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804703-3444332.\n2025-06-25 00:38:23.555 [info] > git config --get commit.template [6ms]\n2025-06-25 00:38:23.576 [info] > git config --get commit.template [10ms]\n2025-06-25 00:38:23.592 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [17ms]\n2025-06-25 00:38:23.622 [info] > git status -z -uall [18ms]\n2025-06-25 00:38:23.637 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [24ms]\n2025-06-25 00:38:23.664 [info] > git merge-base refs/heads/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [12ms]\n2025-06-25 00:38:23.684 [info] > git diff --name-status -z --diff-filter=ADMR 9e8a3860d8270b2dbf00311f48a6f8bf621b1aca...refs/remotes/origin/seeded-episode-sampling [12ms]\n",log,content
|
| 183 |
+
182,171252,"vscode.git.Git",19455,0,"2025-06-25 00:38:23.889 [info] > git config --local branch.seeded-episode-sampling.github-pr-owner-number [11ms]\n2025-06-25 00:38:23.889 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-25 00:38:23.890 [info] > git diff --no-color [20ms]\n",log,content
|
| 184 |
+
183,171440,"vscode.git.Git",19717,0,"2025-06-25 00:38:23.900 [info] > git config --get commit.template [11ms]\n2025-06-25 00:38:23.913 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:38:23.929 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [67ms]\n2025-06-25 00:38:23.931 [info] > git status -z -uall [12ms]\n2025-06-25 00:38:23.950 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n",log,content
|
| 185 |
+
184,172680,"vscode.git.Git",20417,0,"2025-06-25 00:38:25.145 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n2025-06-25 00:38:25.153 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n",log,content
|
| 186 |
+
185,172860,"vscode.git.Git",20687,0,"2025-06-25 00:38:25.349 [info] > git show --textconv HEAD:train_tokenizer.py [17ms]\n2025-06-25 00:38:25.360 [info] > git show --textconv HEAD:train_tokenizer.py [12ms]\n",log,content
|
| 187 |
+
186,173049,"train_tokenizer.py",0,0,"",python,tab
|
| 188 |
+
187,173310,"train_dynamics.py",0,0,"",python,tab
|
| 189 |
+
188,173311,"train_dynamics.py",6425,0,"",python,selection_command
|
| 190 |
+
189,174394,"train_lam.py",0,0,"",python,tab
|
| 191 |
+
190,174395,"train_lam.py",6991,0,"",python,selection_command
|
| 192 |
+
191,175731,"utils/nn.py",0,0,"import math\nfrom typing import Dict, Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n num_heads: int\n dropout: float\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n deterministic=False,\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n deterministic=False,\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm()(x)\n z = nn.Dense(self.dim)(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(),\n nn.Dense(self.model_dim),\n nn.LayerNorm(),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n )(x)\n x = nn.Dense(self.out_dim)(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(self, x: jax.Array, training: bool) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n distance = self.drop(distance)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n",python,tab
|
| 193 |
+
192,175732,"utils/nn.py",1420,0,"",python,selection_command
|
| 194 |
+
193,204437,"utils/nn.py",3209,107," def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n",python,content
|
| 195 |
+
194,206308,"vscode.git.Git",0,0,"",log,tab
|
| 196 |
+
195,209498,"vscode.git.Git",28469,0,"2025-06-25 00:39:01.714 [info] > git config --get commit.template [10ms]\n2025-06-25 00:39:01.729 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:39:01.757 [info] > git status -z -uall [17ms]\n2025-06-25 00:39:01.773 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n",log,content
|
| 197 |
+
196,209889,"vscode.git.Git",28984,0,"2025-06-25 00:39:02.353 [info] > git add -A -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [15ms]\n2025-06-25 00:39:02.373 [info] > git config --get commit.template [9ms]\n2025-06-25 00:39:02.390 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [17ms]\n2025-06-25 00:39:02.417 [info] > git status -z -uall [18ms]\n2025-06-25 00:39:02.431 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [24ms]\n",log,content
|
| 198 |
+
197,210096,"vscode.git.Git",29617,0,"2025-06-25 00:39:02.656 [info] > git diff --no-color [16ms]\n",log,content
|
| 199 |
+
198,211618,"vscode.git.Git",29677,0,"2025-06-25 00:39:03.936 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [13ms]\n2025-06-25 00:39:03.946 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [15ms]\n2025-06-25 00:39:03.955 [info] > git cat-file -s 2b106612ba0c4615ee46876eff4e52dcf8d121aa [10ms]\n",log,content
|
| 200 |
+
199,211837,"vscode.git.Git",30031,0,"2025-06-25 00:39:04.173 [info] > git show --textconv HEAD:utils/nn.py [11ms]\n2025-06-25 00:39:04.183 [info] > git show --textconv :utils/nn.py [10ms]\n",log,content
|
| 201 |
+
200,212529,"vscode.git.Git",30181,0,"2025-06-25 00:39:04.912 [info] > git -c user.useConfigOnly=true commit --quiet --allow-empty-message --file - [767ms]\n2025-06-25 00:39:04.912 [info] [WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804744-3445150.\nblack....................................................................Passed\n[INFO] Restored changes from /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804744-3445150.\n2025-06-25 00:39:04.927 [info] > git config --get commit.template [7ms]\n2025-06-25 00:39:04.941 [info] > git config --get commit.template [6ms]\n2025-06-25 00:39:04.958 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:39:04.984 [info] > git status -z -uall [17ms]\n2025-06-25 00:39:04.991 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [19ms]\n2025-06-25 00:39:05.007 [info] > git merge-base refs/heads/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [8ms]\n2025-06-25 00:39:05.024 [info] > git diff --name-status -z --diff-filter=ADMR 9e8a3860d8270b2dbf00311f48a6f8bf621b1aca...refs/remotes/origin/seeded-episode-sampling [10ms]\n",log,content
|
| 202 |
+
201,212895,"vscode.git.Git",31555,0,"2025-06-25 00:39:05.347 [info] > git config --local branch.seeded-episode-sampling.github-pr-owner-number [12ms]\n2025-06-25 00:39:05.347 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-25 00:39:05.348 [info] > git diff --no-color [21ms]\n2025-06-25 00:39:05.356 [info] > git config --get commit.template [9ms]\n2025-06-25 00:39:05.370 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:39:05.397 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [78ms]\n2025-06-25 00:39:05.400 [info] > git status -z -uall [19ms]\n2025-06-25 00:39:05.415 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n",log,content
|
| 203 |
+
202,214221,"vscode.git.Git",32516,0,"2025-06-25 00:39:06.641 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [10ms]\n2025-06-25 00:39:06.649 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [9ms]\n2025-06-25 00:39:06.797 [info] > git config --get commit.template [10ms]\n2025-06-25 00:39:06.812 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:39:06.837 [info] > git status -z -uall [15ms]\n",log,content
|
| 204 |
+
203,214432,"vscode.git.Git",33158,0,"2025-06-25 00:39:06.848 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [20ms]\n2025-06-25 00:39:06.861 [info] > git show --textconv HEAD:utils/nn.py [14ms]\n2025-06-25 00:39:06.894 [info] > git show --textconv HEAD:utils/nn.py [13ms]\n",log,content
|
| 205 |
+
204,219260,"vscode.git.Git",33440,0,"2025-06-25 00:39:11.695 [info] > git push origin seeded-episode-sampling:seeded-episode-sampling [1503ms]\n2025-06-25 00:39:11.695 [info] To github.com:p-doom/jafar.git\n 9e8a386..917a739 seeded-episode-sampling -> seeded-episode-sampling\n2025-06-25 00:39:11.719 [info] > git config --get commit.template [12ms]\n2025-06-25 00:39:11.730 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [12ms]\n2025-06-25 00:39:11.750 [info] > git status -z -uall [13ms]\n2025-06-25 00:39:11.770 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n2025-06-25 00:39:11.796 [info] > git merge-base refs/heads/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [11ms]\n2025-06-25 00:39:11.817 [info] > git diff --name-status -z --diff-filter=ADMR 917a7397f06657ab25fa0612b8c9f8e00aefbf33...refs/remotes/origin/seeded-episode-sampling [11ms]\n2025-06-25 00:39:11.874 [info] > git config --get commit.template [12ms]\n2025-06-25 00:39:11.882 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [11ms]\n",log,content
|
| 206 |
+
205,219881,"vscode.git.Git",34828,0,"2025-06-25 00:39:11.908 [info] > git status -z -uall [15ms]\n2025-06-25 00:39:11.924 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:39:12.015 [info] > git config --local branch.seeded-episode-sampling.github-pr-owner-number [4ms]\n2025-06-25 00:39:12.015 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-25 00:39:12.016 [info] > git diff --no-color [12ms]\n2025-06-25 00:39:12.083 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [84ms]\n",log,content
|
| 207 |
+
206,220315,"utils/nn.py",0,0,"",python,tab
|
| 208 |
+
207,221950,"TERMINAL",0,0,"^C",,terminal_command
|
| 209 |
+
208,221951,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;4bd28968-9983-490f-a4cb-77f4e120a232]633;C]0;franz.srambical@hpc-submit02:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D",,terminal_output
|
| 210 |
+
209,222128,"TERMINAL",0,0,"^C",,terminal_command
|
| 211 |
+
210,222129,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;4bd28968-9983-490f-a4cb-77f4e120a232]633;C]0;franz.srambical@hpc-submit02:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D",,terminal_output
|
| 212 |
+
211,223240,"TERMINAL",0,0,"squeue -w supergpu16,supergpu18,gpusrv[69,70],supergpu14",,terminal_command
|
| 213 |
+
212,223241,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;squeue -w supergpu16,supergpu18,gpusrv[69,70],supergpu14;4bd28968-9983-490f-a4cb-77f4e120a232]633;C",,terminal_output
|
| 214 |
+
213,273141,"utils/nn.py",1484,0,"",python,selection_mouse
|
| 215 |
+
214,658929,"TERMINAL",0,0,"slurm_load_node error: Unable to contact slurm controller (connect failure)\r\n]0;franz.srambical@hpc-submit02:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D;1",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-12014243-c6dd-4552-b068-b295ea888a8c1764864769359-2025_12_04-17.12.58.257/source.csv
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,74,"Untitled-1",0,0,"",plaintext,tab
|
| 3 |
+
2,479,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:12:58 PM [info] Activating crowd-code\n5:12:58 PM [info] Recording started\n5:12:58 PM [info] Initializing git provider using file system watchers...\n5:12:58 PM [info] No workspace folder found\n",Log,tab
|
| 4 |
+
3,2113,"Untitled-1",0,0,"",plaintext,tab
|
| 5 |
+
4,6885,"Untitled-1",0,0,"f",plaintext,content
|
| 6 |
+
5,6888,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 7 |
+
6,6921,"Untitled-1",1,0,"o",plaintext,content
|
| 8 |
+
7,6923,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 9 |
+
8,7003,"Untitled-1",2,0,"r",plaintext,content
|
| 10 |
+
9,7005,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 11 |
+
10,7119,"Untitled-1",3,0," ",plaintext,content
|
| 12 |
+
11,7122,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 13 |
+
12,8855,"Untitled-1",4,0,"i",plaintext,content
|
| 14 |
+
13,8857,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 15 |
+
14,9180,"Untitled-1",5,0," ",plaintext,content
|
| 16 |
+
15,9181,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 17 |
+
16,9185,"Untitled-1",6,0,"i",plaintext,content
|
| 18 |
+
17,9187,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 19 |
+
18,9189,"Untitled-1",7,0,"n",plaintext,content
|
| 20 |
+
19,9191,"Untitled-1",8,0,"",plaintext,selection_keyboard
|
| 21 |
+
20,9322,"Untitled-1",8,0," ",plaintext,content
|
| 22 |
+
21,9325,"Untitled-1",9,0,"",plaintext,selection_keyboard
|
| 23 |
+
22,9589,"Untitled-1",9,0,"r",plaintext,content
|
| 24 |
+
23,9591,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 25 |
+
24,9714,"Untitled-1",10,0,"a",plaintext,content
|
| 26 |
+
25,9715,"Untitled-1",11,0,"",plaintext,selection_keyboard
|
| 27 |
+
26,9781,"Untitled-1",11,0,"n",plaintext,content
|
| 28 |
+
27,9783,"Untitled-1",12,0,"",plaintext,selection_keyboard
|
| 29 |
+
28,9967,"Untitled-1",12,0,"g",plaintext,content
|
| 30 |
+
29,9968,"Untitled-1",13,0,"",plaintext,selection_keyboard
|
| 31 |
+
30,9970,"Untitled-1",13,0,"e",plaintext,content
|
| 32 |
+
31,9971,"Untitled-1",14,0,"",plaintext,selection_keyboard
|
| 33 |
+
32,14014,"Untitled-1",13,0,"",plaintext,selection_command
|
| 34 |
+
33,14418,"Untitled-1",0,14,"",plaintext,content
|
| 35 |
+
34,16660,"Untitled-1",0,0,"""",plaintext,content
|
| 36 |
+
35,16662,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 37 |
+
36,16666,"Untitled-1",1,0,"""",plaintext,content
|
| 38 |
+
37,16668,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 39 |
+
38,16832,"Untitled-1",2,0,"""",plaintext,content
|
| 40 |
+
39,16836,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 41 |
+
40,17071,"Untitled-1",3,0,"h",plaintext,content
|
| 42 |
+
41,17073,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 43 |
+
42,17136,"Untitled-1",4,0,"e",plaintext,content
|
| 44 |
+
43,17138,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 45 |
+
44,17305,"Untitled-1",5,0,"l",plaintext,content
|
| 46 |
+
45,17307,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 47 |
+
46,17446,"Untitled-1",6,0,"l",plaintext,content
|
| 48 |
+
47,17447,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 49 |
+
48,17509,"Untitled-1",7,0,"o",plaintext,content
|
| 50 |
+
49,17510,"Untitled-1",8,0,"",plaintext,selection_keyboard
|
| 51 |
+
50,17719,"Untitled-1",8,0," ",plaintext,content
|
| 52 |
+
51,17720,"Untitled-1",9,0,"",plaintext,selection_keyboard
|
| 53 |
+
52,17871,"Untitled-1",9,0,"w",plaintext,content
|
| 54 |
+
53,17873,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 55 |
+
54,17959,"Untitled-1",10,0,"r",plaintext,content
|
| 56 |
+
55,17960,"Untitled-1",11,0,"",plaintext,selection_keyboard
|
| 57 |
+
56,18131,"Untitled-1",11,0,"o",plaintext,content
|
| 58 |
+
57,18133,"Untitled-1",12,0,"",plaintext,selection_keyboard
|
| 59 |
+
58,18428,"Untitled-1",12,0,"l",plaintext,content
|
| 60 |
+
59,18430,"Untitled-1",13,0,"",plaintext,selection_keyboard
|
| 61 |
+
60,18463,"Untitled-1",13,0,"d",plaintext,content
|
| 62 |
+
61,18465,"Untitled-1",14,0,"",plaintext,selection_keyboard
|
| 63 |
+
62,18546,"Untitled-1",14,0," ",plaintext,content
|
| 64 |
+
63,18548,"Untitled-1",15,0,"",plaintext,selection_keyboard
|
| 65 |
+
64,18911,"Untitled-1",9,6,"",plaintext,content
|
| 66 |
+
65,19028,"Untitled-1",9,0,"w",plaintext,content
|
| 67 |
+
66,19030,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 68 |
+
67,19093,"Untitled-1",10,0,"o",plaintext,content
|
| 69 |
+
68,19095,"Untitled-1",11,0,"",plaintext,selection_keyboard
|
| 70 |
+
69,19171,"Untitled-1",11,0,"r",plaintext,content
|
| 71 |
+
70,19173,"Untitled-1",12,0,"",plaintext,selection_keyboard
|
| 72 |
+
71,19345,"Untitled-1",12,0,"l",plaintext,content
|
| 73 |
+
72,19348,"Untitled-1",13,0,"",plaintext,selection_keyboard
|
| 74 |
+
73,19435,"Untitled-1",13,0,"d",plaintext,content
|
| 75 |
+
74,19438,"Untitled-1",14,0,"",plaintext,selection_keyboard
|
| 76 |
+
75,19548,"Untitled-1",14,0," ",plaintext,content
|
| 77 |
+
76,19550,"Untitled-1",15,0,"",plaintext,selection_keyboard
|
| 78 |
+
77,19656,"Untitled-1",15,0,"f",plaintext,content
|
| 79 |
+
78,19658,"Untitled-1",16,0,"",plaintext,selection_keyboard
|
| 80 |
+
79,19661,"Untitled-1",16,0,"u",plaintext,content
|
| 81 |
+
80,19662,"Untitled-1",17,0,"",plaintext,selection_keyboard
|
| 82 |
+
81,19791,"Untitled-1",17,0,"n",plaintext,content
|
| 83 |
+
82,19793,"Untitled-1",18,0,"",plaintext,selection_keyboard
|
| 84 |
+
83,19819,"Untitled-1",18,0,"c",plaintext,content
|
| 85 |
+
84,19821,"Untitled-1",19,0,"",plaintext,selection_keyboard
|
| 86 |
+
85,19997,"Untitled-1",19,0,"t",plaintext,content
|
| 87 |
+
86,19999,"Untitled-1",20,0,"",plaintext,selection_keyboard
|
| 88 |
+
87,20087,"Untitled-1",20,0,"i",plaintext,content
|
| 89 |
+
88,20089,"Untitled-1",21,0,"",plaintext,selection_keyboard
|
| 90 |
+
89,20092,"Untitled-1",21,0,"o",plaintext,content
|
| 91 |
+
90,20093,"Untitled-1",22,0,"",plaintext,selection_keyboard
|
| 92 |
+
91,20149,"Untitled-1",22,0,"n",plaintext,content
|
| 93 |
+
92,20150,"Untitled-1",23,0,"",plaintext,selection_keyboard
|
| 94 |
+
93,20547,"Untitled-1",23,0,"""",plaintext,content
|
| 95 |
+
94,20549,"Untitled-1",24,0,"",plaintext,selection_keyboard
|
| 96 |
+
95,20663,"Untitled-1",24,0,"""",plaintext,content
|
| 97 |
+
96,20665,"Untitled-1",25,0,"",plaintext,selection_keyboard
|
| 98 |
+
97,20836,"Untitled-1",25,0,"""",plaintext,content
|
| 99 |
+
98,20838,"Untitled-1",26,0,"",plaintext,selection_keyboard
|
| 100 |
+
99,21110,"Untitled-1",26,0,"\n",plaintext,content
|
| 101 |
+
100,21606,"Untitled-1",27,0,"d",plaintext,content
|
| 102 |
+
101,21608,"Untitled-1",28,0,"",plaintext,selection_keyboard
|
| 103 |
+
102,21723,"Untitled-1",28,0,"e",plaintext,content
|
| 104 |
+
103,21725,"Untitled-1",29,0,"",plaintext,selection_keyboard
|
| 105 |
+
104,21864,"Untitled-1",29,0,"f",plaintext,content
|
| 106 |
+
105,21867,"Untitled-1",30,0,"",plaintext,selection_keyboard
|
| 107 |
+
106,22043,"Untitled-1",30,0," ",plaintext,content
|
| 108 |
+
107,22045,"Untitled-1",31,0,"",plaintext,selection_keyboard
|
| 109 |
+
108,27351,"Untitled-1",31,0,"h",plaintext,content
|
| 110 |
+
109,27353,"Untitled-1",32,0,"",plaintext,selection_keyboard
|
| 111 |
+
110,27460,"Untitled-1",32,0,"e",plaintext,content
|
| 112 |
+
111,27462,"Untitled-1",33,0,"",plaintext,selection_keyboard
|
| 113 |
+
112,27618,"Untitled-1",33,0,"l",plaintext,content
|
| 114 |
+
113,27620,"Untitled-1",34,0,"",plaintext,selection_keyboard
|
| 115 |
+
114,27744,"Untitled-1",34,0,"l",plaintext,content
|
| 116 |
+
115,27746,"Untitled-1",35,0,"",plaintext,selection_keyboard
|
| 117 |
+
116,27852,"Untitled-1",35,0,"o",plaintext,content
|
| 118 |
+
117,27854,"Untitled-1",36,0,"",plaintext,selection_keyboard
|
| 119 |
+
118,28318,"Untitled-1",36,0,"_",plaintext,content
|
| 120 |
+
119,28320,"Untitled-1",37,0,"",plaintext,selection_keyboard
|
| 121 |
+
120,28466,"Untitled-1",37,0,"w",plaintext,content
|
| 122 |
+
121,28467,"Untitled-1",38,0,"",plaintext,selection_keyboard
|
| 123 |
+
122,28572,"Untitled-1",38,0,"o",plaintext,content
|
| 124 |
+
123,28574,"Untitled-1",39,0,"",plaintext,selection_keyboard
|
| 125 |
+
124,28645,"Untitled-1",39,0,"r",plaintext,content
|
| 126 |
+
125,28647,"Untitled-1",40,0,"",plaintext,selection_keyboard
|
| 127 |
+
126,28832,"Untitled-1",40,0,"l",plaintext,content
|
| 128 |
+
127,28834,"Untitled-1",41,0,"",plaintext,selection_keyboard
|
| 129 |
+
128,28906,"Untitled-1",41,0,"d",plaintext,content
|
| 130 |
+
129,28907,"Untitled-1",42,0,"",plaintext,selection_keyboard
|
| 131 |
+
130,29156,"Untitled-1",42,0,"()",plaintext,content
|
| 132 |
+
131,29158,"Untitled-1",43,0,"",plaintext,selection_keyboard
|
| 133 |
+
132,29253,"Untitled-1",43,1,")",plaintext,content
|
| 134 |
+
133,29255,"Untitled-1",44,0,"",plaintext,selection_keyboard
|
| 135 |
+
134,29501,"Untitled-1",44,0,"\n",plaintext,content
|
| 136 |
+
135,120719,"Untitled-1",40,0,":",plaintext,content
|
| 137 |
+
136,122508,"Untitled-1",45,1,"",plaintext,content
|
| 138 |
+
137,124263,"Untitled-1",40,1,"",plaintext,content
|
| 139 |
+
138,124472,"Untitled-1",39,0,"",plaintext,selection_command
|
| 140 |
+
139,125355,"Untitled-1",44,0,"",plaintext,selection_command
|
| 141 |
+
140,125753,"Untitled-1",44,0,":",plaintext,content
|
| 142 |
+
141,125754,"Untitled-1",45,0,"",plaintext,selection_keyboard
|
| 143 |
+
142,125971,"Untitled-1",45,0,"\n",plaintext,content
|
| 144 |
+
143,130372,"Untitled-1",46,0," print(""hello world"")",plaintext,content
|
| 145 |
+
144,131908,"Untitled-1",69,0,"",plaintext,selection_command
|
| 146 |
+
145,132465,"Untitled-1",44,0,"",plaintext,selection_command
|
| 147 |
+
146,132568,"Untitled-1",23,0,"",plaintext,selection_command
|
| 148 |
+
147,132986,"Untitled-1",0,26,"""""""hello world function""""""",plaintext,selection_command
|
| 149 |
+
148,133188,"Untitled-1",0,45,"""""""hello world function""""""\ndef hello_world():",plaintext,selection_command
|
| 150 |
+
149,133321,"Untitled-1",0,70,"""""""hello world function""""""\ndef hello_world():\n print(""hello world"")",plaintext,selection_command
|
| 151 |
+
150,135894,"Untitled-1",0,70,"",plaintext,content
|
| 152 |
+
151,136272,"Untitled-1",0,0,"""",plaintext,content
|
| 153 |
+
152,136274,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 154 |
+
153,136422,"Untitled-1",1,0,"""",plaintext,content
|
| 155 |
+
154,136423,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 156 |
+
155,136553,"Untitled-1",2,0,"""",plaintext,content
|
| 157 |
+
156,136555,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 158 |
+
157,137455,"Untitled-1",2,0,"",plaintext,selection_command
|
| 159 |
+
158,138832,"Untitled-1",3,0,"hello world function""""""\ndef hello_world():\n print(""hello world"")",plaintext,content
|
| 160 |
+
159,138835,"Untitled-1",23,0,"",plaintext,selection_command
|
| 161 |
+
160,139275,"Untitled-1",44,0,"",plaintext,selection_command
|
| 162 |
+
161,139400,"Untitled-1",69,0,"",plaintext,selection_command
|
| 163 |
+
162,139600,"Untitled-1",44,0,"",plaintext,selection_command
|
| 164 |
+
163,140091,"Untitled-1",45,0,"\n",plaintext,content
|
| 165 |
+
164,141003,"Untitled-1",46,0," ",plaintext,content
|
| 166 |
+
165,141700,"Untitled-1",50,0,"r",plaintext,content
|
| 167 |
+
166,141702,"Untitled-1",51,0,"",plaintext,selection_keyboard
|
| 168 |
+
167,143609,"Untitled-1",51,0,"a",plaintext,content
|
| 169 |
+
168,143611,"Untitled-1",52,0,"",plaintext,selection_keyboard
|
| 170 |
+
169,143650,"Untitled-1",52,0,"i",plaintext,content
|
| 171 |
+
170,143652,"Untitled-1",53,0,"",plaintext,selection_keyboard
|
| 172 |
+
171,143851,"Untitled-1",53,0,"s",plaintext,content
|
| 173 |
+
172,143852,"Untitled-1",54,0,"",plaintext,selection_keyboard
|
| 174 |
+
173,143867,"Untitled-1",54,0,"e",plaintext,content
|
| 175 |
+
174,143869,"Untitled-1",55,0,"",plaintext,selection_keyboard
|
| 176 |
+
175,143935,"Untitled-1",55,0," ",plaintext,content
|
| 177 |
+
176,143937,"Untitled-1",56,0,"",plaintext,selection_keyboard
|
| 178 |
+
177,144249,"Untitled-1",56,0,"V",plaintext,content
|
| 179 |
+
178,144251,"Untitled-1",57,0,"",plaintext,selection_keyboard
|
| 180 |
+
179,144545,"Untitled-1",57,0,"a",plaintext,content
|
| 181 |
+
180,144546,"Untitled-1",58,0,"",plaintext,selection_keyboard
|
| 182 |
+
181,144549,"Untitled-1",58,0,"l",plaintext,content
|
| 183 |
+
182,144550,"Untitled-1",59,0,"",plaintext,selection_keyboard
|
| 184 |
+
183,144552,"Untitled-1",59,0,"u",plaintext,content
|
| 185 |
+
184,144553,"Untitled-1",60,0,"",plaintext,selection_keyboard
|
| 186 |
+
185,144561,"Untitled-1",60,0,"e",plaintext,content
|
| 187 |
+
186,144562,"Untitled-1",61,0,"",plaintext,selection_keyboard
|
| 188 |
+
187,144943,"Untitled-1",61,0,"E",plaintext,content
|
| 189 |
+
188,144944,"Untitled-1",62,0,"",plaintext,selection_keyboard
|
| 190 |
+
189,145053,"Untitled-1",62,0,"r",plaintext,content
|
| 191 |
+
190,145054,"Untitled-1",63,0,"",plaintext,selection_keyboard
|
| 192 |
+
191,145220,"Untitled-1",63,0,"r",plaintext,content
|
| 193 |
+
192,145222,"Untitled-1",64,0,"",plaintext,selection_keyboard
|
| 194 |
+
193,145309,"Untitled-1",64,0,"o",plaintext,content
|
| 195 |
+
194,145311,"Untitled-1",65,0,"",plaintext,selection_keyboard
|
| 196 |
+
195,145403,"Untitled-1",65,0,"r",plaintext,content
|
| 197 |
+
196,145404,"Untitled-1",66,0,"",plaintext,selection_keyboard
|
| 198 |
+
197,145697,"Untitled-1",66,0,"()",plaintext,content
|
| 199 |
+
198,145699,"Untitled-1",67,0,"",plaintext,selection_keyboard
|
| 200 |
+
199,145856,"Untitled-1",67,1,")",plaintext,content
|
| 201 |
+
200,145858,"Untitled-1",68,0,"",plaintext,selection_keyboard
|
| 202 |
+
201,146280,"Untitled-1",67,0,"",plaintext,selection_command
|
| 203 |
+
202,146566,"Untitled-1",67,0,"""",plaintext,content
|
| 204 |
+
203,146568,"Untitled-1",68,0,"",plaintext,selection_keyboard
|
| 205 |
+
204,146761,"Untitled-1",68,0,"t",plaintext,content
|
| 206 |
+
205,146763,"Untitled-1",69,0,"",plaintext,selection_keyboard
|
| 207 |
+
206,146867,"Untitled-1",69,0,"h",plaintext,content
|
| 208 |
+
207,146869,"Untitled-1",70,0,"",plaintext,selection_keyboard
|
| 209 |
+
208,146926,"Untitled-1",70,0,"i",plaintext,content
|
| 210 |
+
209,146928,"Untitled-1",71,0,"",plaintext,selection_keyboard
|
| 211 |
+
210,147099,"Untitled-1",71,0,"s",plaintext,content
|
| 212 |
+
211,147101,"Untitled-1",72,0,"",plaintext,selection_keyboard
|
| 213 |
+
212,147172,"Untitled-1",72,0," ",plaintext,content
|
| 214 |
+
213,147174,"Untitled-1",73,0,"",plaintext,selection_keyboard
|
| 215 |
+
214,147533,"Untitled-1",73,0,"l",plaintext,content
|
| 216 |
+
215,147535,"Untitled-1",74,0,"",plaintext,selection_keyboard
|
| 217 |
+
216,147537,"Untitled-1",74,0,"i",plaintext,content
|
| 218 |
+
217,147538,"Untitled-1",75,0,"",plaintext,selection_keyboard
|
| 219 |
+
218,147541,"Untitled-1",75,0,"n",plaintext,content
|
| 220 |
+
219,147542,"Untitled-1",76,0,"",plaintext,selection_keyboard
|
| 221 |
+
220,147611,"Untitled-1",76,0,"e",plaintext,content
|
| 222 |
+
221,147613,"Untitled-1",77,0,"",plaintext,selection_keyboard
|
| 223 |
+
222,147801,"Untitled-1",77,0," ",plaintext,content
|
| 224 |
+
223,147804,"Untitled-1",78,0,"",plaintext,selection_keyboard
|
| 225 |
+
224,147881,"Untitled-1",78,0,"s",plaintext,content
|
| 226 |
+
225,147883,"Untitled-1",79,0,"",plaintext,selection_keyboard
|
| 227 |
+
226,147886,"Untitled-1",79,0,"h",plaintext,content
|
| 228 |
+
227,147887,"Untitled-1",80,0,"",plaintext,selection_keyboard
|
| 229 |
+
228,147914,"Untitled-1",80,0,"o",plaintext,content
|
| 230 |
+
229,147916,"Untitled-1",81,0,"",plaintext,selection_keyboard
|
| 231 |
+
230,148007,"Untitled-1",81,0,"u",plaintext,content
|
| 232 |
+
231,148009,"Untitled-1",82,0,"",plaintext,selection_keyboard
|
| 233 |
+
232,148219,"Untitled-1",82,0,"l",plaintext,content
|
| 234 |
+
233,148220,"Untitled-1",83,0,"",plaintext,selection_keyboard
|
| 235 |
+
234,148268,"Untitled-1",83,0,"d",plaintext,content
|
| 236 |
+
235,148269,"Untitled-1",84,0,"",plaintext,selection_keyboard
|
| 237 |
+
236,148345,"Untitled-1",84,0," ",plaintext,content
|
| 238 |
+
237,148347,"Untitled-1",85,0,"",plaintext,selection_keyboard
|
| 239 |
+
238,148499,"Untitled-1",85,0,"b",plaintext,content
|
| 240 |
+
239,148500,"Untitled-1",86,0,"",plaintext,selection_keyboard
|
| 241 |
+
240,148502,"Untitled-1",86,0,"e",plaintext,content
|
| 242 |
+
241,148503,"Untitled-1",87,0,"",plaintext,selection_keyboard
|
| 243 |
+
242,148631,"Untitled-1",87,0," ",plaintext,content
|
| 244 |
+
243,148632,"Untitled-1",88,0,"",plaintext,selection_keyboard
|
| 245 |
+
244,148738,"Untitled-1",88,0,"d",plaintext,content
|
| 246 |
+
245,148739,"Untitled-1",89,0,"",plaintext,selection_keyboard
|
| 247 |
+
246,148824,"Untitled-1",89,0,"e",plaintext,content
|
| 248 |
+
247,148825,"Untitled-1",90,0,"",plaintext,selection_keyboard
|
| 249 |
+
248,148967,"Untitled-1",90,0,"l",plaintext,content
|
| 250 |
+
249,148968,"Untitled-1",91,0,"",plaintext,selection_keyboard
|
| 251 |
+
250,148995,"Untitled-1",91,0,"e",plaintext,content
|
| 252 |
+
251,148997,"Untitled-1",92,0,"",plaintext,selection_keyboard
|
| 253 |
+
252,149102,"Untitled-1",92,0,"t",plaintext,content
|
| 254 |
+
253,149103,"Untitled-1",93,0,"",plaintext,selection_keyboard
|
| 255 |
+
254,149198,"Untitled-1",93,0,"e",plaintext,content
|
| 256 |
+
255,149200,"Untitled-1",94,0,"",plaintext,selection_keyboard
|
| 257 |
+
256,149413,"Untitled-1",94,0,"d",plaintext,content
|
| 258 |
+
257,149414,"Untitled-1",95,0,"",plaintext,selection_keyboard
|
| 259 |
+
258,149653,"Untitled-1",95,0,"""",plaintext,content
|
| 260 |
+
259,149655,"Untitled-1",96,0,"",plaintext,selection_keyboard
|
| 261 |
+
260,149924,"Untitled-1",95,0,"",plaintext,selection_command
|
| 262 |
+
261,150697,"Untitled-1",97,0,"",plaintext,selection_command
|
| 263 |
+
262,198225,"Untitled-1",46,48,"",plaintext,content
|
| 264 |
+
263,199079,"Untitled-1",48,0,"",plaintext,selection_command
|
| 265 |
+
264,199466,"Untitled-1",46,4,"",plaintext,content
|
| 266 |
+
265,199504,"Untitled-1",50,0,"",plaintext,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-13a6759c-b987-4abe-95b3-2a670a8e33c11765778999540-2025_12_15-07.10.10.804/source.csv
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"crates/cli/src/main.rs",0,0,"//! CLI tool for serializing crowd-pilot IDE interaction data.\n//!\n//! This tool processes CSV session files and outputs JSONL format suitable for\n//! NeMo SFT training. It uses an embedded Python interpreter to load HuggingFace\n//! tokenizers for accurate token counting.\n\nuse std::path::PathBuf;\n\nuse clap::Parser;\nuse pyo3::prelude::*;\nuse pyo3::types::PyModule;\n\nuse crowd_pilot_serializer_core::{\n pipeline::{PipelineConfig, PipelineResult},\n process_all_sessions, write_jsonl_output, Tokenizer,\n};\n\n/// Serialize crowd-pilot CSV sessions to NeMo JSONL format.\n#[derive(Parser, Debug)]\n#[command(name = ""crowd-pilot-serialize"")]\n#[command(author, version, about, long_about = None)]\nstruct Args {\n /// Root directory containing CSV session files\n #[arg(long)]\n csv_root: PathBuf,\n\n /// Output directory for JSONL files\n #[arg(long)]\n output_dir: PathBuf,\n\n /// HuggingFace tokenizer model name or path\n #[arg(long)]\n tokenizer: String,\n\n /// Maximum tokens per conversation chunk\n #[arg(long, default_value = ""8192"")]\n max_tokens_per_conversation: usize,\n\n /// Maximum tokens per message\n #[arg(long, default_value = ""2048"")]\n max_tokens_per_message: usize,\n\n /// Minimum messages required to keep a conversation\n #[arg(long, default_value = ""5"")]\n min_conversation_messages: usize,\n\n /// Viewport radius (lines above/below cursor)\n #[arg(long, default_value = ""10"")]\n viewport_radius: usize,\n\n /// Coalesce radius for grouping nearby edits\n #[arg(long, default_value = ""5"")]\n coalesce_radius: usize,\n\n /// Fraction of sessions for validation (0.0-1.0)\n #[arg(long, default_value = ""0.1"")]\n val_ratio: f64,\n\n /// Custom system prompt (optional)\n #[arg(long)]\n system_prompt: Option<String>,\n}\n\nconst DEFAULT_SYSTEM_PROMPT: &str = r#""You are a helpful assistant that can interact multiple times with a computer shell to solve programming tasks.\nYour response must contain exactly ONE bash code block with ONE command (or commands connected with && or ||).\n\nFormat your response as shown in <format_example>.\n\n<format_example>\n```bash\nyour_command_here\n```\n</format_example>\n\nFailure to follow these rules will cause your response to be rejected.""#;\n\n/// Wrapper around Python tokenizer for exact token counting and truncation.\nstruct PythonTokenizer {\n tokenizer: Py<PyAny>,\n}\n\nimpl PythonTokenizer {\n /// Load a HuggingFace tokenizer.\n fn load(model_name: &str) -> PyResult<Self> {\n Python::with_gil(|py| {\n let transformers = PyModule::import(py, ""transformers"")?;\n let auto_tokenizer = transformers.getattr(""AutoTokenizer"")?;\n let tokenizer = auto_tokenizer.call_method1(""from_pretrained"", (model_name,))?;\n Ok(Self {\n tokenizer: tokenizer.into(),\n })\n })\n }\n}\n\nimpl Tokenizer for PythonTokenizer {\n fn count_tokens(&self, text: &str) -> usize {\n Python::with_gil(|py| {\n let tokenizer = self.tokenizer.as_ref(py);\n let tokens = tokenizer\n .call_method1(""encode"", (text,))\n .expect(""Failed to encode text with tokenizer"");\n tokens.len().unwrap()\n })\n }\n\n fn truncate_to_max_tokens(&self, text: &str, max_tokens: usize) -> String {\n Python::with_gil(|py| {\n let tokenizer = self.tokenizer.as_ref(py);\n let kwargs = pyo3::types::PyDict::new(py);\n kwargs.set_item(""max_length"", max_tokens).unwrap();\n kwargs.set_item(""truncation"", true).unwrap();\n \n let tokens = tokenizer\n .call_method(""encode"", (text,), Some(kwargs))\n .expect(""Failed to encode text with tokenizer"");\n \n tokenizer\n .call_method1(""decode"", (tokens,))\n .expect(""Failed to decode tokens"")\n .extract()\n .unwrap()\n })\n }\n}\n\nfn main() -> Result<(), Box<dyn std::error::Error>> {\n let args = Args::parse();\n\n println!(""Loading tokenizer from {}..."", args.tokenizer);\n let tokenizer = PythonTokenizer::load(&args.tokenizer)?;\n\n let config = PipelineConfig {\n max_tokens_per_conversation: args.max_tokens_per_conversation,\n max_tokens_per_message: args.max_tokens_per_message,\n min_conversation_messages: args.min_conversation_messages,\n viewport_radius: args.viewport_radius,\n coalesce_radius: args.coalesce_radius,\n val_ratio: args.val_ratio,\n };\n\n println!(""Processing CSV files from {:?}..."", args.csv_root);\n let session_results = process_all_sessions(\n &args.csv_root,\n &tokenizer,\n &config,\n )?;\n\n let total_sessions = session_results.len();\n println!(""Processed {} sessions"", total_sessions);\n\n let system_prompt = args.system_prompt.as_deref().unwrap_or(DEFAULT_SYSTEM_PROMPT);\n\n println!(""Writing output to {:?}..."", args.output_dir);\n let result: PipelineResult = write_jsonl_output(\n session_results,\n &args.output_dir,\n args.val_ratio,\n system_prompt,\n )?;\n\n let metadata_path = args.output_dir.join(""metadata.json"");\n let metadata = serde_json::json!({\n ""config"": {\n ""csv_root"": args.csv_root.to_string_lossy(),\n ""output_dir"": args.output_dir.to_string_lossy(),\n ""tokenizer"": args.tokenizer,\n ""max_tokens_per_conversation"": args.max_tokens_per_conversation,\n ""max_tokens_per_message"": args.max_tokens_per_message,\n ""min_conversation_messages"": args.min_conversation_messages,\n ""viewport_radius"": args.viewport_radius,\n ""coalesce_radius"": args.coalesce_radius,\n ""val_ratio"": args.val_ratio,\n },\n ""counts"": {\n ""total_sessions"": result.total_sessions,\n ""total_conversations"": result.total_conversations,\n ""train_conversations"": result.train_conversations,\n ""val_conversations"": result.val_conversations,\n },\n ""stats"": {\n ""total_messages"": result.total_messages,\n ""total_tokens"": result.total_tokens,\n ""avg_messages_per_conversation"": if result.total_conversations > 0 {\n result.total_messages as f64 / result.total_conversations as f64\n } else {\n 0.0\n },\n ""avg_tokens_per_conversation"": if result.total_conversations > 0 {\n result.total_tokens as f64 / result.total_conversations as f64\n } else {\n 0.0\n },\n },\n ""files"": {\n ""train_path"": args.output_dir.join(""training.jsonl"").to_string_lossy(),\n ""val_path"": args.output_dir.join(""validation.jsonl"").to_string_lossy(),\n },\n });\n std::fs::write(&metadata_path, serde_json::to_string_pretty(&metadata)?)?;\n\n println!(""\n[summary]"");\n println!("" Total sessions processed: {}"", result.total_sessions);\n println!("" Train conversations: {}"", result.train_conversations);\n println!("" Val conversations: {}"", result.val_conversations);\n println!("" Total messages: {}"", result.total_messages);\n println!("" Total tokens: {}"", result.total_tokens);\n println!("" Output: {:?}/{{training,validation}}.jsonl"", args.output_dir);\n println!("" Metadata: {:?}"", metadata_path);\n\n Ok(())\n}\n\n",rust,tab
|
| 3 |
+
2,463,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:10:10 AM [info] Activating crowd-code\n7:10:10 AM [info] Recording started\n7:10:10 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 4 |
+
3,637,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"7:10:11 AM [info] Git repository found\n7:10:11 AM [info] Git provider initialized successfully\n7:10:11 AM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,49340,"TERMINAL",0,0,"",,terminal_focus
|
| 6 |
+
5,49342,"crates/cli/src/main.rs",0,0,"",rust,tab
|
| 7 |
+
6,52343,"TERMINAL",0,0,"cd..",,terminal_command
|
| 8 |
+
7,52659,"TERMINAL",0,0,"ls",,terminal_command
|
| 9 |
+
8,52663,"TERMINAL",0,0,"]633;CCargo.lock Cargo.toml [0m[01;34mcrates[0m LICENSE README.md [01;34mtarget[0m\r\n]0;franz.srambical@hai-login2:~/crowd-pilot-serializer",,terminal_output
|
| 10 |
+
9,54165,"TERMINAL",0,0,"cd ..",,terminal_command
|
| 11 |
+
10,54501,"TERMINAL",0,0,"ls",,terminal_command
|
| 12 |
+
11,54502,"TERMINAL",0,0,"]633;C[0m[01;34mcleanrl[0m [01;34mcrowd-pilot[0m [01;34mjafar[0m [01;34mnpm-global[0m [01;34mslurm[0m [01;34mtest_output[0m\r\n[01;34mcrowd-code[0m [01;34mcrowd-pilot-extension[0m [01;34mjax_cache[0m [01;34moai-compatible-copilot[0m [01;34mStoix[0m [01;34mvscode-crowd-pilot-chat[0m\r\n[01;34mcrowd-code-player[0m [01;34mcrowd-pilot-serializer[0m [01;34mmaxtext[0m [01;34msbatch-runner[0m [01;34mtab_model_eval[0m [01;34mzed[0m\r\n]0;franz.srambical@hai-login2:~",,terminal_output
|
| 13 |
+
12,63427,"TERMINAL",0,0,"git clone git@github.com:p-doom/miles.git",,terminal_command
|
| 14 |
+
13,63467,"TERMINAL",0,0,"]633;CCloning into 'miles'...\r\n",,terminal_output
|
| 15 |
+
14,65005,"TERMINAL",0,0,"remote: Enumerating objects: 7374, done.[K\r\nremote: Counting objects: 0% (1/1502)[K\rremote: Counting objects: 1% (16/1502)[K\rremote: Counting objects: 2% (31/1502)[K\rremote: Counting objects: 3% (46/1502)[K\rremote: Counting objects: 4% (61/1502)[K\rremote: Counting objects: 5% (76/1502)[K\rremote: Counting objects: 6% (91/1502)[K\rremote: Counting objects: 7% (106/1502)[K\rremote: Counting objects: 8% (121/1502)[K\rremote: Counting objects: 9% (136/1502)[K\rremote: Counting objects: 10% (151/1502)[K\rremote: Counting objects: 11% (166/1502)[K\rremote: Counting objects: 12% (181/1502)[K\rremote: Counting objects: 13% (196/1502)[K\rremote: Counting objects: 14% (211/1502)[K\rremote: Counting objects: 15% (226/1502)[K\rremote: Counting objects: 16% (241/1502)[K\rremote: Counting objects: 17% (256/1502)[K\rremote: Counting objects: 18% (271/1502)[K\rremote: Counting objects: 19% (286/1502)[K\rremote: Counting objects: 20% (301/1502)[K\rremote: Counting objects: 21% (316/1502)[K\rremote: Counting objects: 22% (331/1502)[K\rremote: Counting objects: 23% (346/1502)[K\rremote: Counting objects: 24% (361/1502)[K\rremote: Counting objects: 25% (376/1502)[K\rremote: Counting objects: 26% (391/1502)[K\rremote: Counting objects: 27% (406/1502)[K\rremote: Counting objects: 28% (421/1502)[K\rremote: Counting objects: 29% (436/1502)[K\rremote: Counting objects: 30% (451/1502)[K\rremote: Counting objects: 31% (466/1502)[K\rremote: Counting objects: 32% (481/1502)[K\r",,terminal_output
|
| 16 |
+
15,65101,"TERMINAL",0,0,"remote: Counting objects: 33% (496/1502)[K\rremote: Counting objects: 34% (511/1502)[K\rremote: Counting objects: 35% (526/1502)[K\rremote: Counting objects: 36% (541/1502)[K\rremote: Counting objects: 37% (556/1502)[K\rremote: Counting objects: 38% (571/1502)[K\rremote: Counting objects: 39% (586/1502)[K\rremote: Counting objects: 40% (601/1502)[K\rremote: Counting objects: 41% (616/1502)[K\rremote: Counting objects: 42% (631/1502)[K\rremote: Counting objects: 43% (646/1502)[K\rremote: Counting objects: 44% (661/1502)[K\rremote: Counting objects: 45% (676/1502)[K\rremote: Counting objects: 46% (691/1502)[K\rremote: Counting objects: 47% (706/1502)[K\rremote: Counting objects: 48% (721/1502)[K\rremote: Counting objects: 49% (736/1502)[K\rremote: Counting objects: 50% (751/1502)[K\rremote: Counting objects: 51% (767/1502)[K\rremote: Counting objects: 52% (782/1502)[K\rremote: Counting objects: 53% (797/1502)[K\rremote: Counting objects: 54% (812/1502)[K\rremote: Counting objects: 55% (827/1502)[K\rremote: Counting objects: 56% (842/1502)[K\rremote: Counting objects: 57% (857/1502)[K\rremote: Counting objects: 58% (872/1502)[K\rremote: Counting objects: 59% (887/1502)[K\rremote: Counting objects: 60% (902/1502)[K\rremote: Counting objects: 61% (917/1502)[K\rremote: Counting objects: 62% (932/1502)[K\rremote: Counting objects: 63% (947/1502)[K\rremote: Counting objects: 64% (962/1502)[K\rremote: Counting objects: 65% (977/1502)[K\rremote: Counting objects: 66% (992/1502)[K\rremote: Counting objects: 67% (1007/1502)[K\rremote: Counting objects: 68% (1022/1502)[K\rremote: Counting objects: 69% (1037/1502)[K\rremote: Counting objects: 70% (1052/1502)[K\rremote: Counting objects: 71% (1067/1502)[K\rremote: Counting objects: 72% (1082/1502)[K\rremote: Counting objects: 73% (1097/1502)[K\rremote: Counting objects: 74% (1112/1502)[K\rremote: Counting objects: 75% (1127/1502)[K\rremote: Counting objects: 76% (1142/1502)[K\rremote: Counting objects: 77% (1157/1502)[K\rremote: Counting objects: 78% (1172/1502)[K\rremote: Counting objects: 79% (1187/1502)[K\rremote: Counting objects: 80% (1202/1502)[K\rremote: Counting objects: 81% (1217/1502)[K\rremote: Counting objects: 82% (1232/1502)[K\rremote: Counting objects: 83% (1247/1502)[K\rremote: Counting objects: 84% (1262/1502)[K\rremote: Counting objects: 85% (1277/1502)[K\rremote: Counting objects: 86% (1292/1502)[K\rremote: Counting objects: 87% (1307/1502)[K\rremote: Counting objects: 88% (1322/1502)[K\rremote: Counting objects: 89% (1337/1502)[K\rremote: Counting objects: 90% (1352/1502)[K\rremote: Counting objects: 91% (1367/1502)[K\rremote: Counting objects: 92% (1382/1502)[K\rremote: Counting objects: 93% (1397/1502)[K\rremote: Counting objects: 94% (1412/1502)[K\rremote: Counting objects: 95% (1427/1502)[K\rremote: Counting objects: 96% (1442/1502)[K\rremote: Counting objects: 97% (1457/1502)[K\rremote: Counting objects: 98% (1472/1502)[K\rremote: Counting objects: 99% (1487/1502)[K\rremote: Counting objects: 100% (1502/1502)[K\rremote: Counting objects: 100% (1502/1502), done.[K\r\nremote: Compressing objects: 0% (1/235)[K\rremote: Compressing objects: 1% (3/235)[K\rremote: Compressing objects: 2% (5/235)[K\rremote: Compressing objects: 3% (8/235)[K\rremote: Compressing objects: 4% (10/235)[K\rremote: Compressing objects: 5% (12/235)[K\rremote: Compressing objects: 6% (15/235)[K\rremote: Compressing objects: 7% (17/235)[K\rremote: Compressing objects: 8% (19/235)[K\rremote: Compressing objects: 9% (22/235)[K\rremote: Compressing objects: 10% (24/235)[K\rremote: Compressing objects: 11% (26/235)[K\rremote: Compressing objects: 12% (29/235)[K\rremote: Compressing objects: 13% (31/235)[K\rremote: Compressing objects: 14% (33/235)[K\rremote: Compressing objects: 15% (36/235)[K\rremote: Compressing objects: 16% (38/235)[K\rremote: Compressing objects: 17% (40/235)[K\rremote: Compressing objects: 18% (43/235)[K\rremote: Compressing objects: 19% (45/235)[K\rremote: Compressing objects: 20% (47/235)[K\rremote: Compressing objects: 21% (50/235)[K\rremote: Compressing objects: 22% (52/235)[K\rremote: Compressing objects: 23% (55/235)[K\rremote: Compressing objects: 24% (57/235)[K\rremote: Compressing objects: 25% (59/235)[K\rremote: Compressing objects: 26% (62/235)[K\rremote: Compressing objects: 27% (64/235)[K\rremote: Compressing objects: 28% (66/235)[K\rremote: Compressing objects: 29% (69/235)[K\rremote: Compressing objects: 30% (71/235)[K\rremote: Compressing objects: 31% (73/235)[K\rremote: Compressing objects: 32% (76/235)[K\rremote: Compressing objects: 33% (78/235)[K\rremote: Compressing objects: 34% (80/235)[K\rremote: Compressing objects: 35% (83/235)[K\rremote: Compressing objects: 36% (85/235)[K\rremote: Compressing objects: 37% (87/235)[K\rremote: Compressing objects: 38% (90/235)[K\rremote: Compressing objects: 39% (92/235)[K\rremote: Compressing objects: 40% (94/235)[K\rremote: Compressing objects: 41% (97/235)[K\rremote: Compressing objects: 42% (99/235)[K\rremote: Compressing objects: 43% (102/235)[K\rremote: Compressing objects: 44% (104/235)[K\rremote: Compressing objects: 45% (106/235)[K\rremote: Compressing objects: 46% (109/235)[K\rremote: Compressing objects: 47% (111/235)[K\rremote: Compressing objects: 48% (113/235)[K\rremote: Compressing objects: 49% (116/235)[K\rremote: Compressing objects: 50% (118/235)[K\rremote: Compressing objects: 51% (120/235)[K\rremote: Compressing objects: 52% (123/235)[K\rremote: Compressing objects: 53% (125/235)[K\rremote: Compressing objects: 54% (127/235)[K\rremote: Compressing objects: 55% (130/235)[K\rremote: Compressing objects: 56% (132/235)[K\rremote: Compressing objects: 57% (134/235)[K\rremote: Compressing objects: 58% (137/235)[K\rremote: Compressing objects: 59% (139/235)[K\rremote: Compressing objects: 60% (141/235)[K\rremote: Compressing objects: 61% (144/235)[K\rremote: Compressing objects: 62% (146/235)[K\rremote: Compressing objects: 63% (149/235)[K\rremote: Compressing objects: 64% (151/235)[K\rremote: Compressing objects: 65% (153/235)[K\rremote: Compressing objects: 66% (156/235)[K\rremote: Compressing objects: 67% (158/235)[K\rremote: Compressing objects: 68% (160/235)[K\rremote: Compressing objects: 69% (163/235)[K\rremote: Compressing objects: 70% (165/235)[K\rremote: Compressing objects: 71% (167/235)[K\rremote: Compressing objects: 72% (170/235)[K\rremote: Compressing objects: 73% (172/235)[K\rremote: Compressing objects: 74% (174/235)[K\rremote: Compressing objects: 75% (177/235)[K\rremote: Compressing objects: 76% (179/235)[K\rremote: Compressing objects: 77% (181/235)[K\rremote: Compressing objects: 78% (184/235)[K\rremote: Compressing objects: 79% (186/235)[K\rremote: Compressing objects: 80% (188/235)[K\rremote: Compressing objects: 81% (191/235)[K\rremote: Compressing objects: 82% (193/235)[K\rremote: Compressing objects: 83% (196/235)[K\rremote: Compressing objects: 84% (198/235)[K\rremote: Compressing objects: 85% (200/235)[K\rremote: Compressing objects: 86% (203/235)[K\rremote: Compressing objects: 87% (205/235)[K\rremote: Compressing objects: 88% (207/235)[K\rremote: Compressing objects: 89% (210/235)[K\rremote: Compressing objects: 90% (212/235)[K\rremote: Compressing objects: 91% (214/235)[K\rremote: Compressing objects: 92% (217/235)[K\rremote: Compressing objects: 93% (219/235)[K\rremote: Compressing objects: 94% (221/235)[K\rremote: Compressing objects: 95% (224/235)[K\rremote: Compressing objects: 96% (226/235)[K\rremote: Compressing objects: 97% (228/235)[K\rremote: Compressing objects: 98% (231/235)[K\rremote: Compressing objects: 99% (233/235)[K\rremote: Compressing objects: 100% (235/235)[K\rremote: Compressing objects: 100% (235/235), done.[K\r\nReceiving objects: 0% (1/7374)\r",,terminal_output
|
| 17 |
+
16,66152,"TERMINAL",0,0,"Receiving objects: 0% (57/7374), 1.46 MiB | 1.45 MiB/s\r",,terminal_output
|
| 18 |
+
17,66334,"TERMINAL",0,0,"Receiving objects: 1% (74/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 2% (148/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 3% (222/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 4% (295/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 5% (369/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 6% (443/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 7% (517/7374), 1.46 MiB | 1.45 MiB/s\r",,terminal_output
|
| 19 |
+
18,66473,"TERMINAL",0,0,"Receiving objects: 8% (590/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 9% (664/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 10% (738/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 11% (812/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 12% (885/7374), 1.46 MiB | 1.45 MiB/s\r",,terminal_output
|
| 20 |
+
19,66531,"TERMINAL",0,0,"Receiving objects: 13% (959/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 14% (1033/7374), 1.46 MiB | 1.45 MiB/s\rReceiving objects: 15% (1107/7374), 1.46 MiB | 1.45 MiB/s\r",,terminal_output
|
| 21 |
+
20,66999,"TERMINAL",0,0,"Receiving objects: 16% (1180/7374), 2.31 MiB | 1.53 MiB/s\rReceiving objects: 17% (1254/7374), 2.31 MiB | 1.53 MiB/s\r",,terminal_output
|
| 22 |
+
21,67160,"TERMINAL",0,0,"Receiving objects: 17% (1283/7374), 3.36 MiB | 1.67 MiB/s\r",,terminal_output
|
| 23 |
+
22,67862,"TERMINAL",0,0,"Receiving objects: 18% (1328/7374), 4.17 MiB | 1.66 MiB/s\rReceiving objects: 19% (1402/7374), 4.17 MiB | 1.66 MiB/s\rReceiving objects: 20% (1475/7374), 4.17 MiB | 1.66 MiB/s\rReceiving objects: 21% (1549/7374), 4.17 MiB | 1.66 MiB/s\rReceiving objects: 22% (1623/7374), 4.17 MiB | 1.66 MiB/s\rReceiving objects: 23% (1697/7374), 4.17 MiB | 1.66 MiB/s\r",,terminal_output
|
| 24 |
+
23,68170,"TERMINAL",0,0,"Receiving objects: 23% (1749/7374), 4.17 MiB | 1.66 MiB/s\rReceiving objects: 24% (1770/7374), 4.17 MiB | 1.66 MiB/s\rReceiving objects: 25% (1844/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 26% (1918/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 27% (1991/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 28% (2065/7374), 5.33 MiB | 1.77 MiB/s\r",,terminal_output
|
| 25 |
+
24,68481,"TERMINAL",0,0,"Receiving objects: 29% (2139/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 30% (2213/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 31% (2286/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 32% (2360/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 33% (2434/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 34% (2508/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 35% (2581/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 36% (2655/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 37% (2729/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 38% (2803/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 39% (2876/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 40% (2950/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 41% (3024/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 42% (3098/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 43% (3171/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 44% (3245/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 45% (3319/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 46% (3393/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 47% (3466/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 48% (3540/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 49% (3614/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 50% (3687/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 51% (3761/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 52% (3835/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 53% (3909/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 54% (3982/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 55% (4056/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 56% (4130/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 57% (4204/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 58% (4277/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 59% (4351/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 60% (4425/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 61% (4499/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 62% (4572/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 63% (4646/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 64% (4720/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 65% (4794/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 66% (4867/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 67% (4941/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 68% (5015/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 69% (5089/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 70% (5162/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 71% (5236/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 72% (5310/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 73% (5384/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 74% (5457/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 75% (5531/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 76% (5605/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 77% (5678/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 78% (5752/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 79% (5826/7374), 5.33 MiB | 1.77 MiB/s\r",,terminal_output
|
| 26 |
+
25,68578,"TERMINAL",0,0,"Receiving objects: 80% (5900/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 81% (5973/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 82% (6047/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 83% (6121/7374), 5.33 MiB | 1.77 MiB/s\r",,terminal_output
|
| 27 |
+
26,68806,"TERMINAL",0,0,"Receiving objects: 84% (6195/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 85% (6268/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 86% (6342/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 87% (6416/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 88% (6490/7374), 5.33 MiB | 1.77 MiB/s\rReceiving objects: 89% (6563/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 90% (6637/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 91% (6711/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 92% (6785/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 93% (6858/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 94% (6932/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 95% (7006/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 96% (7080/7374), 6.84 MiB | 1.95 MiB/s\rremote: Total 7374 (delta 1351), reused 1271 (delta 1267), pack-reused 5872 (from 1)[K\r\nReceiving objects: 97% (7153/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 98% (7227/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 99% (7301/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 100% (7374/7374), 6.84 MiB | 1.95 MiB/s\rReceiving objects: 100% (7374/7374), 7.25 MiB | 1.99 MiB/s, done.\r\nResolving deltas: 0% (0/5280)\rResolving deltas: 1% (56/5280)\rResolving deltas: 2% (106/5280)\rResolving deltas: 3% (162/5280)\rResolving deltas: 4% (214/5280)\rResolving deltas: 5% (264/5280)\rResolving deltas: 6% (317/5280)\rResolving deltas: 7% (370/5280)\rResolving deltas: 8% (424/5280)\rResolving deltas: 9% (476/5280)\rResolving deltas: 10% (528/5280)\rResolving deltas: 11% (581/5280)\rResolving deltas: 12% (634/5280)\rResolving deltas: 13% (688/5280)\rResolving deltas: 14% (742/5280)\rResolving deltas: 15% (792/5280)\rResolving deltas: 16% (849/5280)\rResolving deltas: 17% (898/5280)\rResolving deltas: 18% (951/5280)\rResolving deltas: 19% (1004/5280)\rResolving deltas: 20% (1056/5280)\rResolving deltas: 21% (1109/5280)\rResolving deltas: 22% (1165/5280)\rResolving deltas: 23% (1216/5280)\rResolving deltas: 24% (1270/5280)\rResolving deltas: 25% (1323/5280)\rResolving deltas: 26% (1374/5280)\rResolving deltas: 27% (1426/5280)\rResolving deltas: 28% (1481/5280)\rResolving deltas: 29% (1533/5280)\rResolving deltas: 30% (1584/5280)\rResolving deltas: 31% (1637/5280)\rResolving deltas: 32% (1690/5280)\rResolving deltas: 33% (1743/5280)\rResolving deltas: 34% (1796/5280)\rResolving deltas: 35% (1848/5280)\rResolving deltas: 36% (1901/5280)\rResolving deltas: 37% (1955/5280)\rResolving deltas: 38% (2007/5280)\rResolving deltas: 39% (2060/5280)\rResolving deltas: 40% (2113/5280)\rResolving deltas: 41% (2167/5280)\rResolving deltas: 42% (2218/5280)\rResolving deltas: 43% (2272/5280)\rResolving deltas: 44% (2327/5280)\rResolving deltas: 45% (2376/5280)\rResolving deltas: 46% (2429/5280)\rResolving deltas: 47% (2482/5280)\rResolving deltas: 48% (2535/5280)\rResolving deltas: 49% (2589/5280)\rResolving deltas: 50% (2640/5280)\rResolving deltas: 51% (2693/5280)\rResolving deltas: 52% (2746/5280)\rResolving deltas: 53% (2799/5280)\rResolving deltas: 54% (2852/5280)\rResolving deltas: 55% (2905/5280)\rResolving deltas: 56% (2957/5280)\rResolving deltas: 57% (3011/5280)\rResolving deltas: 58% (3063/5280)\rResolving deltas: 59% (3116/5280)\rResolving deltas: 60% (3168/5280)\rResolving deltas: 61% (3221/5280)\rResolving deltas: 62% (3274/5280)\rResolving deltas: 63% (3327/5280)\rResolving deltas: 64% (3380/5280)\rResolving deltas: 65% (3432/5280)\rResolving deltas: 66% (3485/5280)\rResolving deltas: 67% (3538/5280)\rResolving deltas: 68% (3592/5280)\rResolving deltas: 69% (3644/5280)\rResolving deltas: 70% (3696/5280)\rResolving deltas: 71% (3750/5280)\rResolving deltas: 72% (3802/5280)\rResolving deltas: 73% (3856/5280)\rResolving deltas: 74% (3908/5280)\rResolving deltas: 75% (3960/5280)\rResolving deltas: 76% (4013/5280)\rResolving deltas: 77% (4066/5280)\rResolving deltas: 78% (4119/5280)\rResolving deltas: 79% (4172/5280)\rResolving deltas: 80% (4224/5280)\rResolving deltas: 81% (4277/5280)\rResolving deltas: 82% (4330/5280)\rResolving deltas: 83% (4383/5280)\rResolving deltas: 84% (4436/5280)\rResolving deltas: 85% (4489/5280)\rResolving deltas: 86% (4541/5280)\rResolving deltas: 87% (4596/5280)\rResolving deltas: 88% (4650/5280)\rResolving deltas: 89% (4702/5280)\rResolving deltas: 90% (4752/5280)\rResolving deltas: 91% (4805/5280)\rResolving deltas: 92% (4859/5280)\rResolving deltas: 93% (4913/5280)\rResolving deltas: 94% (4964/5280)\rResolving deltas: 95% (5016/5280)\rResolving deltas: 96% (5069/5280)\rResolving deltas: 97% (5122/5280)\rResolving deltas: 98% (5177/5280)\rResolving deltas: 99% (5229/5280)\rResolving deltas: 100% (5280/5280)\rResolving deltas: 100% (5280/5280), done.\r\n",,terminal_output
|
| 28 |
+
27,69877,"TERMINAL",0,0,"Updating files: 96% (344/357)\rUpdating files: 97% (347/357)\rUpdating files: 98% (350/357)\rUpdating files: 99% (354/357)\rUpdating files: 100% (357/357)\rUpdating files: 100% (357/357), done.\r\n]0;franz.srambical@hai-login2:~",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1409d307-b9ff-4e0c-ab0e-0cc111e3d75a1755423127885-2025_08_17-11.32.09.747/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-14f0662f-0032-43e8-be9f-8e53d6f150ad1758635869882-2025_09_23-15.57.52.616/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-151b5a28-ad9d-42b1-abde-16bb640375391764847216251-2025_12_04-12.20.23.736/source.csv
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,77,"Untitled-1",0,0,"",plaintext,tab
|
| 3 |
+
2,394,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:20:23 PM [info] Activating crowd-code\n12:20:23 PM [info] Recording started\n12:20:23 PM [info] Initializing git provider using file system watchers...\n12:20:23 PM [info] No workspace folder found\n",Log,tab
|
| 4 |
+
3,1368,"Untitled-1",0,0,"",plaintext,tab
|
| 5 |
+
4,1678,"Untitled-2",0,0,"",plaintext,tab
|
| 6 |
+
5,3062,"Untitled-1",0,0,"",plaintext,tab
|
| 7 |
+
6,7066,"Untitled-1",0,0,"This is the first line.",plaintext,content
|
| 8 |
+
7,8159,"Untitled-1",23,0,"\n",plaintext,content
|
| 9 |
+
8,9802,"Untitled-1",23,1,"",plaintext,content
|
| 10 |
+
9,11722,"Untitled-1",23,0,"\n",plaintext,content
|
| 11 |
+
10,13038,"Untitled-1",24,0,"This is the second line.",plaintext,content
|
| 12 |
+
11,14260,"Untitled-1",24,24,"",plaintext,content
|
| 13 |
+
12,14421,"Untitled-1",23,1,"",plaintext,content
|
| 14 |
+
13,15335,"Untitled-1",0,23,"",plaintext,content
|
| 15 |
+
14,21919,"Untitled-1",0,0,"T",plaintext,content
|
| 16 |
+
15,21922,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 17 |
+
16,22103,"Untitled-1",1,0,"h",plaintext,content
|
| 18 |
+
17,22106,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 19 |
+
18,22112,"Untitled-1",2,0,"i",plaintext,content
|
| 20 |
+
19,22115,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 21 |
+
20,22223,"Untitled-1",3,0,"s",plaintext,content
|
| 22 |
+
21,22226,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 23 |
+
22,22338,"Untitled-1",4,0," ",plaintext,content
|
| 24 |
+
23,22342,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 25 |
+
24,22351,"Untitled-1",5,0,"i",plaintext,content
|
| 26 |
+
25,22353,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 27 |
+
26,22625,"Untitled-1",6,0,"s",plaintext,content
|
| 28 |
+
27,22627,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 29 |
+
28,22642,"Untitled-1",7,0," ",plaintext,content
|
| 30 |
+
29,22645,"Untitled-1",8,0,"",plaintext,selection_keyboard
|
| 31 |
+
30,27238,"Untitled-1",8,0,"t",plaintext,content
|
| 32 |
+
31,27240,"Untitled-1",9,0,"",plaintext,selection_keyboard
|
| 33 |
+
32,27294,"Untitled-1",9,0,"h",plaintext,content
|
| 34 |
+
33,27297,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 35 |
+
34,27371,"Untitled-1",10,0,"e",plaintext,content
|
| 36 |
+
35,27374,"Untitled-1",11,0,"",plaintext,selection_keyboard
|
| 37 |
+
36,27563,"Untitled-1",11,0," ",plaintext,content
|
| 38 |
+
37,27565,"Untitled-1",12,0,"",plaintext,selection_keyboard
|
| 39 |
+
38,27626,"Untitled-1",12,0,"f",plaintext,content
|
| 40 |
+
39,27628,"Untitled-1",13,0,"",plaintext,selection_keyboard
|
| 41 |
+
40,27635,"Untitled-1",13,0,"i",plaintext,content
|
| 42 |
+
41,27637,"Untitled-1",14,0,"",plaintext,selection_keyboard
|
| 43 |
+
42,27710,"Untitled-1",14,0,"r",plaintext,content
|
| 44 |
+
43,27713,"Untitled-1",15,0,"",plaintext,selection_keyboard
|
| 45 |
+
44,27921,"Untitled-1",15,0,"s",plaintext,content
|
| 46 |
+
45,27924,"Untitled-1",16,0,"",plaintext,selection_keyboard
|
| 47 |
+
46,27930,"Untitled-1",16,0,"t",plaintext,content
|
| 48 |
+
47,27932,"Untitled-1",17,0,"",plaintext,selection_keyboard
|
| 49 |
+
48,28063,"Untitled-1",17,0," ",plaintext,content
|
| 50 |
+
49,28066,"Untitled-1",18,0,"",plaintext,selection_keyboard
|
| 51 |
+
50,31172,"Untitled-1",18,0," ",plaintext,content
|
| 52 |
+
51,31775,"Untitled-1",19,1,"",plaintext,content
|
| 53 |
+
52,33276,"Untitled-1",19,0,"This is the first. Another line here.",plaintext,content
|
| 54 |
+
53,34930,"Untitled-1",0,56,"",plaintext,content
|
| 55 |
+
54,36327,"Untitled-1",0,0,"f",plaintext,content
|
| 56 |
+
55,36479,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 57 |
+
56,36484,"Untitled-1",1,0,"o",plaintext,content
|
| 58 |
+
57,36486,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 59 |
+
58,36490,"Untitled-1",2,0,"r",plaintext,content
|
| 60 |
+
59,36491,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 61 |
+
60,36561,"Untitled-1",3,0," ",plaintext,content
|
| 62 |
+
61,36563,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 63 |
+
62,36577,"Untitled-1",4,0,"i",plaintext,content
|
| 64 |
+
63,36579,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 65 |
+
64,36762,"Untitled-1",5,0," ",plaintext,content
|
| 66 |
+
65,36764,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 67 |
+
66,36798,"Untitled-1",6,0,"i",plaintext,content
|
| 68 |
+
67,36800,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 69 |
+
68,36883,"Untitled-1",7,0,"n",plaintext,content
|
| 70 |
+
69,36885,"Untitled-1",8,0,"",plaintext,selection_keyboard
|
| 71 |
+
70,37042,"Untitled-1",8,0," ",plaintext,content
|
| 72 |
+
71,37044,"Untitled-1",9,0,"",plaintext,selection_keyboard
|
| 73 |
+
72,37310,"Untitled-1",9,0,"r",plaintext,content
|
| 74 |
+
73,37312,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 75 |
+
74,37531,"Untitled-1",10,0,"a",plaintext,content
|
| 76 |
+
75,37533,"Untitled-1",11,0,"",plaintext,selection_keyboard
|
| 77 |
+
76,37571,"Untitled-1",11,0,"n",plaintext,content
|
| 78 |
+
77,37573,"Untitled-1",12,0,"",plaintext,selection_keyboard
|
| 79 |
+
78,37666,"Untitled-1",12,0,"g",plaintext,content
|
| 80 |
+
79,37668,"Untitled-1",13,0,"",plaintext,selection_keyboard
|
| 81 |
+
80,37708,"Untitled-1",13,0,"e",plaintext,content
|
| 82 |
+
81,37710,"Untitled-1",14,0,"",plaintext,selection_keyboard
|
| 83 |
+
82,37890,"Untitled-1",14,0," ",plaintext,content
|
| 84 |
+
83,37892,"Untitled-1",15,0,"",plaintext,selection_keyboard
|
| 85 |
+
84,38784,"Untitled-1",14,1,"",plaintext,content
|
| 86 |
+
85,39037,"Untitled-1",14,0,"()",plaintext,content
|
| 87 |
+
86,39040,"Untitled-1",15,0,"",plaintext,selection_keyboard
|
| 88 |
+
87,41901,"Untitled-1",15,0,"0",plaintext,content
|
| 89 |
+
88,41903,"Untitled-1",16,0,"",plaintext,selection_keyboard
|
| 90 |
+
89,42097,"Untitled-1",16,0,",",plaintext,content
|
| 91 |
+
90,42099,"Untitled-1",17,0,"",plaintext,selection_keyboard
|
| 92 |
+
91,42435,"Untitled-1",17,0,"1",plaintext,content
|
| 93 |
+
92,42439,"Untitled-1",18,0,"",plaintext,selection_keyboard
|
| 94 |
+
93,42447,"Untitled-1",18,0,"0",plaintext,content
|
| 95 |
+
94,42449,"Untitled-1",19,0,"",plaintext,selection_keyboard
|
| 96 |
+
95,45316,"Untitled-1",18,0,"",plaintext,selection_command
|
| 97 |
+
96,45876,"Untitled-1",20,0,"",plaintext,selection_command
|
| 98 |
+
97,47251,"Untitled-1",20,0,":",plaintext,content
|
| 99 |
+
98,47254,"Untitled-1",21,0,"",plaintext,selection_keyboard
|
| 100 |
+
99,47522,"Untitled-1",21,0,"\n",plaintext,content
|
| 101 |
+
100,48220,"Untitled-1",21,1,"",plaintext,content
|
| 102 |
+
101,48574,"Untitled-1",20,1,"",plaintext,content
|
| 103 |
+
102,48920,"Untitled-1",20,0,":",plaintext,content
|
| 104 |
+
103,48922,"Untitled-1",21,0,"",plaintext,selection_keyboard
|
| 105 |
+
104,49184,"Untitled-1",21,0,"\n",plaintext,content
|
| 106 |
+
105,49706,"Untitled-1",21,0,"This loop will iterate from 0 to 9 inclusive.",plaintext,content
|
| 107 |
+
106,52631,"Untitled-1",66,1,"",plaintext,content
|
| 108 |
+
107,52821,"Untitled-1",65,1,"",plaintext,content
|
| 109 |
+
108,54871,"Untitled-1",64,0,"",plaintext,selection_command
|
| 110 |
+
109,55180,"Untitled-1",56,0,"",plaintext,selection_command
|
| 111 |
+
110,55717,"Untitled-1",64,0,"",plaintext,selection_command
|
| 112 |
+
111,55972,"Untitled-1",64,1,"e",plaintext,selection_command
|
| 113 |
+
112,56163,"Untitled-1",56,9,"inclusive",plaintext,selection_command
|
| 114 |
+
113,56403,"Untitled-1",54,11,"9 inclusive",plaintext,selection_command
|
| 115 |
+
114,56427,"Untitled-1",51,14,"to 9 inclusive",plaintext,selection_command
|
| 116 |
+
115,56468,"Untitled-1",49,16,"0 to 9 inclusive",plaintext,selection_command
|
| 117 |
+
116,56503,"Untitled-1",44,21,"from 0 to 9 inclusive",plaintext,selection_command
|
| 118 |
+
117,56534,"Untitled-1",36,29,"iterate from 0 to 9 inclusive",plaintext,selection_command
|
| 119 |
+
118,56573,"Untitled-1",31,34,"will iterate from 0 to 9 inclusive",plaintext,selection_command
|
| 120 |
+
119,56598,"Untitled-1",26,39,"loop will iterate from 0 to 9 inclusive",plaintext,selection_command
|
| 121 |
+
120,56632,"Untitled-1",21,44,"This loop will iterate from 0 to 9 inclusive",plaintext,selection_command
|
| 122 |
+
121,56755,"Untitled-1",19,46,"):This loop will iterate from 0 to 9 inclusive",plaintext,selection_command
|
| 123 |
+
122,57082,"Untitled-1",20,45,":This loop will iterate from 0 to 9 inclusive",plaintext,selection_command
|
| 124 |
+
123,57246,"Untitled-1",21,44,"This loop will iterate from 0 to 9 inclusive",plaintext,selection_command
|
| 125 |
+
124,57549,"Untitled-1",21,44,"",plaintext,content
|
| 126 |
+
125,57972,"Untitled-1",21,0,"\n",plaintext,content
|
| 127 |
+
126,60475,"Untitled-1",22,0," ",plaintext,content
|
| 128 |
+
127,65813,"Untitled-1",26,0,"p",plaintext,content
|
| 129 |
+
128,65816,"Untitled-1",27,0,"",plaintext,selection_keyboard
|
| 130 |
+
129,65897,"Untitled-1",27,0,"r",plaintext,content
|
| 131 |
+
130,65899,"Untitled-1",28,0,"",plaintext,selection_keyboard
|
| 132 |
+
131,66009,"Untitled-1",28,0,"i",plaintext,content
|
| 133 |
+
132,66012,"Untitled-1",29,0,"",plaintext,selection_keyboard
|
| 134 |
+
133,66051,"Untitled-1",29,0,"n",plaintext,content
|
| 135 |
+
134,66054,"Untitled-1",30,0,"",plaintext,selection_keyboard
|
| 136 |
+
135,66252,"Untitled-1",30,0,"t",plaintext,content
|
| 137 |
+
136,66256,"Untitled-1",31,0,"",plaintext,selection_keyboard
|
| 138 |
+
137,67531,"Untitled-1",31,0,"()",plaintext,content
|
| 139 |
+
138,67533,"Untitled-1",32,0,"",plaintext,selection_keyboard
|
| 140 |
+
139,67695,"Untitled-1",32,1,")",plaintext,content
|
| 141 |
+
140,67697,"Untitled-1",33,0,"",plaintext,selection_keyboard
|
| 142 |
+
141,74789,"Untitled-1",32,0,"",plaintext,selection_command
|
| 143 |
+
142,78811,"Untitled-1",33,0,"",plaintext,selection_command
|
| 144 |
+
143,81730,"Untitled-1",32,0,"",plaintext,selection_command
|
| 145 |
+
144,81814,"Untitled-1",33,0,"",plaintext,selection_command
|
| 146 |
+
145,83341,"Untitled-1",32,1,"",plaintext,content
|
| 147 |
+
146,84355,"Untitled-1",31,0,"",plaintext,selection_command
|
| 148 |
+
147,85087,"Untitled-1",31,0,"print(",plaintext,content
|
| 149 |
+
148,86385,"Untitled-1",36,1,"",plaintext,content
|
| 150 |
+
149,86500,"Untitled-1",35,1,"",plaintext,content
|
| 151 |
+
150,86650,"Untitled-1",34,1,"",plaintext,content
|
| 152 |
+
151,86754,"Untitled-1",33,1,"",plaintext,content
|
| 153 |
+
152,86901,"Untitled-1",32,1,"",plaintext,content
|
| 154 |
+
153,87898,"Untitled-1",31,1,"",plaintext,content
|
| 155 |
+
154,88232,"Untitled-1",32,0,"",plaintext,selection_command
|
| 156 |
+
155,90897,"Untitled-1",31,1,"",plaintext,content
|
| 157 |
+
156,91201,"Untitled-1",31,0,"()",plaintext,content
|
| 158 |
+
157,91204,"Untitled-1",32,0,"",plaintext,selection_keyboard
|
| 159 |
+
158,93111,"Untitled-1",31,2,"print('Hello')",plaintext,content
|
| 160 |
+
159,94724,"Untitled-1",32,0,"print('Hello World')",plaintext,content
|
| 161 |
+
160,96383,"Untitled-1",51,0,"",plaintext,selection_command
|
| 162 |
+
161,97063,"Untitled-1",22,43," ",plaintext,content
|
| 163 |
+
162,100026,"Untitled-1",26,0,"p",plaintext,content
|
| 164 |
+
163,100029,"Untitled-1",27,0,"",plaintext,selection_keyboard
|
| 165 |
+
164,103065,"Untitled-1",27,0,"r",plaintext,content
|
| 166 |
+
165,103067,"Untitled-1",28,0,"",plaintext,selection_keyboard
|
| 167 |
+
166,104888,"Untitled-1",27,1,"print i",plaintext,content
|
| 168 |
+
167,106426,"Untitled-1",33,1,"",plaintext,content
|
| 169 |
+
168,106674,"Untitled-1",32,1,"",plaintext,content
|
| 170 |
+
169,106708,"Untitled-1",31,1,"",plaintext,content
|
| 171 |
+
170,106739,"Untitled-1",30,1,"",plaintext,content
|
| 172 |
+
171,106778,"Untitled-1",29,1,"",plaintext,content
|
| 173 |
+
172,107010,"Untitled-1",28,1,"",plaintext,content
|
| 174 |
+
173,107057,"Untitled-1",27,1,"",plaintext,content
|
| 175 |
+
174,107192,"Untitled-1",26,1,"",plaintext,content
|
| 176 |
+
175,107454,"Untitled-1",26,0,"p",plaintext,content
|
| 177 |
+
176,107457,"Untitled-1",27,0,"",plaintext,selection_keyboard
|
| 178 |
+
177,107631,"Untitled-1",27,0,"r",plaintext,content
|
| 179 |
+
178,107634,"Untitled-1",28,0,"",plaintext,selection_keyboard
|
| 180 |
+
179,109259,"Untitled-1",28,0,"i",plaintext,content
|
| 181 |
+
180,109262,"Untitled-1",29,0,"",plaintext,selection_keyboard
|
| 182 |
+
181,109355,"Untitled-1",29,0,"n",plaintext,content
|
| 183 |
+
182,109357,"Untitled-1",30,0,"",plaintext,selection_keyboard
|
| 184 |
+
183,109433,"Untitled-1",30,0,"t",plaintext,content
|
| 185 |
+
184,109435,"Untitled-1",31,0,"",plaintext,selection_keyboard
|
| 186 |
+
185,115515,"Untitled-1",30,0,"",plaintext,selection_command
|
| 187 |
+
186,116658,"Untitled-1",30,1,"t",plaintext,selection_command
|
| 188 |
+
187,117063,"Untitled-1",8,23," range(0,10):\n print",plaintext,selection_command
|
| 189 |
+
188,117949,"Untitled-1",0,31,"for i in range(0,10):\n print",plaintext,selection_command
|
| 190 |
+
189,118232,"Untitled-1",22,9," print",plaintext,selection_command
|
| 191 |
+
190,118733,"Untitled-1",0,31,"for i in range(0,10):\n print",plaintext,selection_command
|
| 192 |
+
191,118845,"Untitled-1",0,31,"",plaintext,content
|
| 193 |
+
192,119466,"Untitled-1",0,0,"""",plaintext,content
|
| 194 |
+
193,119470,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 195 |
+
194,119601,"Untitled-1",1,0,"""",plaintext,content
|
| 196 |
+
195,119604,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 197 |
+
196,119768,"Untitled-1",2,0,"""",plaintext,content
|
| 198 |
+
197,119771,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 199 |
+
198,121320,"Untitled-1",3,0,"""",plaintext,content
|
| 200 |
+
199,121322,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 201 |
+
200,121475,"Untitled-1",4,0,"""",plaintext,content
|
| 202 |
+
201,121478,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 203 |
+
202,121622,"Untitled-1",5,0,"""",plaintext,content
|
| 204 |
+
203,121625,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 205 |
+
204,121895,"Untitled-1",5,0,"",plaintext,selection_command
|
| 206 |
+
205,121964,"Untitled-1",4,0,"",plaintext,selection_command
|
| 207 |
+
206,122109,"Untitled-1",3,0,"",plaintext,selection_command
|
| 208 |
+
207,124381,"Untitled-1",3,0,"h",plaintext,content
|
| 209 |
+
208,124384,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 210 |
+
209,124468,"Untitled-1",4,0,"e",plaintext,content
|
| 211 |
+
210,124470,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 212 |
+
211,124646,"Untitled-1",5,0,"l",plaintext,content
|
| 213 |
+
212,124649,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 214 |
+
213,124750,"Untitled-1",6,0,"l",plaintext,content
|
| 215 |
+
214,124753,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 216 |
+
215,124875,"Untitled-1",7,0,"o",plaintext,content
|
| 217 |
+
216,124878,"Untitled-1",8,0,"",plaintext,selection_keyboard
|
| 218 |
+
217,125064,"Untitled-1",8,0," ",plaintext,content
|
| 219 |
+
218,125067,"Untitled-1",9,0,"",plaintext,selection_keyboard
|
| 220 |
+
219,125073,"Untitled-1",9,0,"w",plaintext,content
|
| 221 |
+
220,125075,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 222 |
+
221,125167,"Untitled-1",10,0,"o",plaintext,content
|
| 223 |
+
222,125170,"Untitled-1",11,0,"",plaintext,selection_keyboard
|
| 224 |
+
223,125242,"Untitled-1",11,0,"r",plaintext,content
|
| 225 |
+
224,125244,"Untitled-1",12,0,"",plaintext,selection_keyboard
|
| 226 |
+
225,125462,"Untitled-1",12,0,"l",plaintext,content
|
| 227 |
+
226,125465,"Untitled-1",13,0,"",plaintext,selection_keyboard
|
| 228 |
+
227,125537,"Untitled-1",13,0,"d",plaintext,content
|
| 229 |
+
228,125540,"Untitled-1",14,0,"",plaintext,selection_keyboard
|
| 230 |
+
229,125593,"Untitled-1",14,0," ",plaintext,content
|
| 231 |
+
230,125596,"Untitled-1",15,0,"",plaintext,selection_keyboard
|
| 232 |
+
231,125847,"Untitled-1",15,0,"f",plaintext,content
|
| 233 |
+
232,125850,"Untitled-1",16,0,"",plaintext,selection_keyboard
|
| 234 |
+
233,125856,"Untitled-1",16,0,"u",plaintext,content
|
| 235 |
+
234,125858,"Untitled-1",17,0,"",plaintext,selection_keyboard
|
| 236 |
+
235,126000,"Untitled-1",17,0,"n",plaintext,content
|
| 237 |
+
236,126003,"Untitled-1",18,0,"",plaintext,selection_keyboard
|
| 238 |
+
237,126067,"Untitled-1",18,0,"c",plaintext,content
|
| 239 |
+
238,126070,"Untitled-1",19,0,"",plaintext,selection_keyboard
|
| 240 |
+
239,126282,"Untitled-1",19,0,"t",plaintext,content
|
| 241 |
+
240,126285,"Untitled-1",20,0,"",plaintext,selection_keyboard
|
| 242 |
+
241,126328,"Untitled-1",20,0,"i",plaintext,content
|
| 243 |
+
242,126331,"Untitled-1",21,0,"",plaintext,selection_keyboard
|
| 244 |
+
243,126373,"Untitled-1",21,0,"o",plaintext,content
|
| 245 |
+
244,126376,"Untitled-1",22,0,"",plaintext,selection_keyboard
|
| 246 |
+
245,126438,"Untitled-1",22,0,"n",plaintext,content
|
| 247 |
+
246,126441,"Untitled-1",23,0,"",plaintext,selection_keyboard
|
| 248 |
+
247,129232,"Untitled-1",22,0,"",plaintext,selection_command
|
| 249 |
+
248,130039,"Untitled-1",26,0,"\n",plaintext,content
|
| 250 |
+
249,131102,"Untitled-1",27,0,"d",plaintext,content
|
| 251 |
+
250,131104,"Untitled-1",28,0,"",plaintext,selection_keyboard
|
| 252 |
+
251,131184,"Untitled-1",28,0,"e",plaintext,content
|
| 253 |
+
252,131186,"Untitled-1",29,0,"",plaintext,selection_keyboard
|
| 254 |
+
253,131404,"Untitled-1",29,0,"f",plaintext,content
|
| 255 |
+
254,131407,"Untitled-1",30,0,"",plaintext,selection_keyboard
|
| 256 |
+
255,131473,"Untitled-1",30,0," ",plaintext,content
|
| 257 |
+
256,131476,"Untitled-1",31,0,"",plaintext,selection_keyboard
|
| 258 |
+
257,132080,"Untitled-1",31,0,"h",plaintext,content
|
| 259 |
+
258,132082,"Untitled-1",32,0,"",plaintext,selection_keyboard
|
| 260 |
+
259,132134,"Untitled-1",32,0,"e",plaintext,content
|
| 261 |
+
260,132136,"Untitled-1",33,0,"",plaintext,selection_keyboard
|
| 262 |
+
261,132307,"Untitled-1",33,0,"l",plaintext,content
|
| 263 |
+
262,132309,"Untitled-1",34,0,"",plaintext,selection_keyboard
|
| 264 |
+
263,132436,"Untitled-1",34,0,"l",plaintext,content
|
| 265 |
+
264,132440,"Untitled-1",35,0,"",plaintext,selection_keyboard
|
| 266 |
+
265,132551,"Untitled-1",35,0,"o",plaintext,content
|
| 267 |
+
266,132554,"Untitled-1",36,0,"",plaintext,selection_keyboard
|
| 268 |
+
267,133272,"Untitled-1",36,0,"_",plaintext,content
|
| 269 |
+
268,133274,"Untitled-1",37,0,"",plaintext,selection_keyboard
|
| 270 |
+
269,133402,"Untitled-1",37,0,"w",plaintext,content
|
| 271 |
+
270,133404,"Untitled-1",38,0,"",plaintext,selection_keyboard
|
| 272 |
+
271,133468,"Untitled-1",38,0,"o",plaintext,content
|
| 273 |
+
272,133471,"Untitled-1",39,0,"",plaintext,selection_keyboard
|
| 274 |
+
273,133572,"Untitled-1",39,0,"r",plaintext,content
|
| 275 |
+
274,133575,"Untitled-1",40,0,"",plaintext,selection_keyboard
|
| 276 |
+
275,133788,"Untitled-1",40,0,"l",plaintext,content
|
| 277 |
+
276,133790,"Untitled-1",41,0,"",plaintext,selection_keyboard
|
| 278 |
+
277,133828,"Untitled-1",41,0,"d",plaintext,content
|
| 279 |
+
278,133830,"Untitled-1",42,0,"",plaintext,selection_keyboard
|
| 280 |
+
279,138153,"Untitled-1",42,0,"()",plaintext,content
|
| 281 |
+
280,138156,"Untitled-1",43,0,"",plaintext,selection_keyboard
|
| 282 |
+
281,138264,"Untitled-1",43,1,")",plaintext,content
|
| 283 |
+
282,138267,"Untitled-1",44,0,"",plaintext,selection_keyboard
|
| 284 |
+
283,139097,"Untitled-1",44,0,":",plaintext,content
|
| 285 |
+
284,139100,"Untitled-1",45,0,"",plaintext,selection_keyboard
|
| 286 |
+
285,140087,"Untitled-1",45,0,"\n",plaintext,content
|
| 287 |
+
286,143260,"Untitled-1",0,45,"",plaintext,content
|
| 288 |
+
287,144861,"Untitled-1",0,1,"""""""hello world function""""""",plaintext,content
|
| 289 |
+
288,144868,"Untitled-1",22,0,"",plaintext,selection_command
|
| 290 |
+
289,146290,"Untitled-1",26,0,"\n",plaintext,content
|
| 291 |
+
290,147371,"Untitled-1",27,0,"d",plaintext,content
|
| 292 |
+
291,147373,"Untitled-1",28,0,"",plaintext,selection_keyboard
|
| 293 |
+
292,147461,"Untitled-1",28,0,"e",plaintext,content
|
| 294 |
+
293,147464,"Untitled-1",29,0,"",plaintext,selection_keyboard
|
| 295 |
+
294,147698,"Untitled-1",29,0,"f",plaintext,content
|
| 296 |
+
295,147701,"Untitled-1",30,0,"",plaintext,selection_keyboard
|
| 297 |
+
296,147708,"Untitled-1",30,0," ",plaintext,content
|
| 298 |
+
297,147710,"Untitled-1",31,0,"",plaintext,selection_keyboard
|
| 299 |
+
298,148045,"Untitled-1",31,0,"h",plaintext,content
|
| 300 |
+
299,148048,"Untitled-1",32,0,"",plaintext,selection_keyboard
|
| 301 |
+
300,148054,"Untitled-1",32,0,"e",plaintext,content
|
| 302 |
+
301,148056,"Untitled-1",33,0,"",plaintext,selection_keyboard
|
| 303 |
+
302,148190,"Untitled-1",33,0,"l",plaintext,content
|
| 304 |
+
303,148194,"Untitled-1",34,0,"",plaintext,selection_keyboard
|
| 305 |
+
304,148319,"Untitled-1",34,0,"l",plaintext,content
|
| 306 |
+
305,148322,"Untitled-1",35,0,"",plaintext,selection_keyboard
|
| 307 |
+
306,148412,"Untitled-1",35,0,"o",plaintext,content
|
| 308 |
+
307,148415,"Untitled-1",36,0,"",plaintext,selection_keyboard
|
| 309 |
+
308,148692,"Untitled-1",36,0,"_",plaintext,content
|
| 310 |
+
309,148694,"Untitled-1",37,0,"",plaintext,selection_keyboard
|
| 311 |
+
310,149041,"Untitled-1",37,0,"w",plaintext,content
|
| 312 |
+
311,149044,"Untitled-1",38,0,"",plaintext,selection_keyboard
|
| 313 |
+
312,149159,"Untitled-1",38,0,"o",plaintext,content
|
| 314 |
+
313,149161,"Untitled-1",39,0,"",plaintext,selection_keyboard
|
| 315 |
+
314,149236,"Untitled-1",39,0,"r",plaintext,content
|
| 316 |
+
315,149239,"Untitled-1",40,0,"",plaintext,selection_keyboard
|
| 317 |
+
316,149407,"Untitled-1",40,0,"l",plaintext,content
|
| 318 |
+
317,149409,"Untitled-1",41,0,"",plaintext,selection_keyboard
|
| 319 |
+
318,149464,"Untitled-1",41,0,"d",plaintext,content
|
| 320 |
+
319,149466,"Untitled-1",42,0,"",plaintext,selection_keyboard
|
| 321 |
+
320,149786,"Untitled-1",42,0,")",plaintext,content
|
| 322 |
+
321,149789,"Untitled-1",43,0,"",plaintext,selection_keyboard
|
| 323 |
+
322,150489,"Untitled-1",42,1,"",plaintext,content
|
| 324 |
+
323,150761,"Untitled-1",42,0,"()",plaintext,content
|
| 325 |
+
324,150763,"Untitled-1",43,0,"",plaintext,selection_keyboard
|
| 326 |
+
325,150836,"Untitled-1",43,1,")",plaintext,content
|
| 327 |
+
326,150838,"Untitled-1",44,0,"",plaintext,selection_keyboard
|
| 328 |
+
327,151289,"Untitled-1",44,0,":",plaintext,content
|
| 329 |
+
328,151292,"Untitled-1",45,0,"",plaintext,selection_keyboard
|
| 330 |
+
329,151623,"Untitled-1",45,0,"\n",plaintext,content
|
| 331 |
+
330,155141,"Untitled-1",45,0,"def hello_world(): This is a comment.",plaintext,content
|
| 332 |
+
331,156700,"Untitled-1",27,0,"",plaintext,selection_command
|
| 333 |
+
332,157533,"Untitled-1",31,0,"",plaintext,selection_command
|
| 334 |
+
333,157689,"Untitled-1",42,0,"",plaintext,selection_command
|
| 335 |
+
334,157849,"Untitled-1",45,0,"",plaintext,selection_command
|
| 336 |
+
335,158653,"Untitled-1",45,37,"",plaintext,content
|
| 337 |
+
336,158661,"Untitled-1",44,0,"",plaintext,selection_command
|
| 338 |
+
337,159535,"Untitled-1",46,0,"",plaintext,selection_command
|
| 339 |
+
338,160209,"Untitled-1",45,0,"This is a comment explaining the function's purpose.\n# Example usage of the function.",plaintext,content
|
| 340 |
+
339,160353,"Untitled-1",131,0," ",plaintext,content
|
| 341 |
+
340,160538,"Untitled-1",135,0," ",plaintext,content
|
| 342 |
+
341,160692,"Untitled-1",139,0," ",plaintext,content
|
| 343 |
+
342,161808,"Untitled-1",98,45,"",plaintext,content
|
| 344 |
+
343,161817,"Untitled-1",45,52,"",plaintext,content
|
| 345 |
+
344,165217,"Untitled-1",46,0," ",plaintext,content
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-17036a72-bca1-4851-94e3-ba9ef28c8a7c1767618340894-2026_01_05-14.05.55.846/source.csv
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
2,920,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:05:55 PM [info] Activating crowd-code\n2:05:55 PM [info] Recording started\n2:05:55 PM [info] Initializing git provider using file system watchers...\n2:05:55 PM [info] No workspace folder found\n",Log,tab
|
| 3 |
+
3,2202,"extension-output-pdoom-org.crowd-code-#1-crowd-code",194,0,"2:05:57 PM [info] Retrying git provider initialization...\n2:05:57 PM [info] No workspace folder found\n",Log,content
|
| 4 |
+
4,30446,"Untitled-1",0,0,"",plaintext,tab
|
| 5 |
+
5,35097,"Untitled-1",0,0,"d",plaintext,content
|
| 6 |
+
6,35100,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 7 |
+
7,35159,"Untitled-1",1,0,"e",plaintext,content
|
| 8 |
+
8,35161,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 9 |
+
9,35352,"Untitled-1",2,0,"f",plaintext,content
|
| 10 |
+
10,35354,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 11 |
+
11,35400,"Untitled-1",3,0," ",plaintext,content
|
| 12 |
+
12,35402,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 13 |
+
13,35441,"Untitled-1",4,0,"h",plaintext,content
|
| 14 |
+
14,35443,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 15 |
+
15,35526,"Untitled-1",5,0,"e",plaintext,content
|
| 16 |
+
16,35529,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 17 |
+
17,35685,"Untitled-1",6,0,"l",plaintext,content
|
| 18 |
+
18,35688,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 19 |
+
19,35837,"Untitled-1",7,0,"l",plaintext,content
|
| 20 |
+
20,35840,"Untitled-1",8,0,"",plaintext,selection_keyboard
|
| 21 |
+
21,35895,"Untitled-1",8,0,"o",plaintext,content
|
| 22 |
+
22,35898,"Untitled-1",9,0,"",plaintext,selection_keyboard
|
| 23 |
+
23,37860,"Untitled-1",9,0,"_",plaintext,content
|
| 24 |
+
24,37863,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 25 |
+
25,42530,"Untitled-1",9,0,"",plaintext,selection_command
|
| 26 |
+
26,43237,"Untitled-1",10,0,"",plaintext,selection_command
|
| 27 |
+
27,45059,"Untitled-1",0,10,"def hello_world\n",plaintext,content
|
| 28 |
+
28,49340,"Untitled-1",0,16,"def hello_world\\n",plaintext,content
|
| 29 |
+
29,51234,"Untitled-1",16,1,"",plaintext,content
|
| 30 |
+
30,51723,"Untitled-1",15,1,"",plaintext,content
|
| 31 |
+
31,53297,"Untitled-1",0,15,"def hello_world:\n",plaintext,content
|
| 32 |
+
32,56650,"Untitled-1",0,17,"def hello_world:\n",plaintext,content
|
| 33 |
+
33,57460,"Untitled-1",0,17,"def hello_world:\n",plaintext,content
|
| 34 |
+
34,57946,"Untitled-1",0,17,"def hello_world:\n",plaintext,content
|
| 35 |
+
35,59133,"Untitled-1",17,0," ",plaintext,content
|
| 36 |
+
36,59625,"Untitled-1",21,0,"p",plaintext,content
|
| 37 |
+
37,59627,"Untitled-1",22,0,"",plaintext,selection_keyboard
|
| 38 |
+
38,59710,"Untitled-1",22,0,"r",plaintext,content
|
| 39 |
+
39,59712,"Untitled-1",23,0,"",plaintext,selection_keyboard
|
| 40 |
+
40,59818,"Untitled-1",23,0,"i",plaintext,content
|
| 41 |
+
41,59821,"Untitled-1",24,0,"",plaintext,selection_keyboard
|
| 42 |
+
42,59869,"Untitled-1",24,0,"n",plaintext,content
|
| 43 |
+
43,59871,"Untitled-1",25,0,"",plaintext,selection_keyboard
|
| 44 |
+
44,61251,"Untitled-1",24,1,"",plaintext,content
|
| 45 |
+
45,61409,"Untitled-1",23,1,"",plaintext,content
|
| 46 |
+
46,62149,"Untitled-1",22,1,"",plaintext,content
|
| 47 |
+
47,62231,"Untitled-1",21,1,"",plaintext,content
|
| 48 |
+
48,62459,"Untitled-1",21,0,"p",plaintext,content
|
| 49 |
+
49,62461,"Untitled-1",22,0,"",plaintext,selection_keyboard
|
| 50 |
+
50,62464,"Untitled-1",22,0,"r",plaintext,content
|
| 51 |
+
51,62465,"Untitled-1",23,0,"",plaintext,selection_keyboard
|
| 52 |
+
52,63412,"Untitled-1",22,1,"",plaintext,content
|
| 53 |
+
53,63604,"Untitled-1",21,1,"",plaintext,content
|
| 54 |
+
54,64988,"Untitled-1",17,4,"",plaintext,content
|
| 55 |
+
55,69660,"Untitled-1",0,17,"def hello_world():\n",plaintext,content
|
| 56 |
+
56,70886,"Untitled-1",19,0," print(""Hello, World!"")\n",plaintext,content
|
| 57 |
+
57,72081,"Untitled-1",19,0,"",plaintext,selection_command
|
| 58 |
+
58,76305,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 59 |
+
59,78121,"TERMINAL",0,0,"",,terminal_focus
|
| 60 |
+
60,78124,"Untitled-1",0,0,"",plaintext,tab
|
| 61 |
+
61,85310,"TERMINAL",0,0,"bash",,terminal_focus
|
| 62 |
+
62,86549,"TERMINAL",0,0,"bash",,terminal_focus
|
| 63 |
+
63,87506,"TERMINAL",0,0,"bash",,terminal_focus
|
| 64 |
+
64,93462,"Untitled-1",23,0,"",plaintext,selection_command
|
| 65 |
+
65,93697,"Untitled-1",28,0,"",plaintext,selection_command
|
| 66 |
+
66,93784,"Untitled-1",30,0,"",plaintext,selection_command
|
| 67 |
+
67,93919,"Untitled-1",35,0,"",plaintext,selection_command
|
| 68 |
+
68,97766,"Untitled-1",46,0,"\n",plaintext,content
|
| 69 |
+
69,108288,"Untitled-1",46,0,"",plaintext,selection_command
|
| 70 |
+
70,108450,"Untitled-1",47,0,"",plaintext,selection_command
|
| 71 |
+
71,109498,"Untitled-1",46,1,"",plaintext,content
|
| 72 |
+
72,109655,"Untitled-1",45,1,"",plaintext,content
|
| 73 |
+
73,111295,"Untitled-1",44,0,"",plaintext,selection_command
|
| 74 |
+
74,111958,"Untitled-1",17,0,"",plaintext,selection_command
|
| 75 |
+
75,112927,"Untitled-1",44,0,"",plaintext,selection_command
|
| 76 |
+
76,112931,"Untitled-1",44,1,")",plaintext,selection_command
|
| 77 |
+
77,113450,"Untitled-1",18,27,"\n print(""Hello, World!"")",plaintext,selection_command
|
| 78 |
+
78,113833,"Untitled-1",0,45,"def hello_world():\n print(""Hello, World!"")",plaintext,selection_command
|
| 79 |
+
79,114677,"Untitled-1",17,0,"",plaintext,selection_command
|
| 80 |
+
80,115308,"Untitled-1",0,18,"def hello_world():",plaintext,selection_command
|
| 81 |
+
81,115505,"Untitled-1",0,45,"def hello_world():\n print(""Hello, World!"")",plaintext,selection_command
|
| 82 |
+
82,115702,"Untitled-1",0,45,"",plaintext,content
|
| 83 |
+
83,117595,"Untitled-1",0,0,"d",plaintext,content
|
| 84 |
+
84,117597,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 85 |
+
85,117855,"Untitled-1",1,0,"f",plaintext,content
|
| 86 |
+
86,117857,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 87 |
+
87,117859,"Untitled-1",2,0," ",plaintext,content
|
| 88 |
+
88,117861,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 89 |
+
89,118107,"Untitled-1",2,1,"",plaintext,content
|
| 90 |
+
90,118194,"Untitled-1",1,1,"",plaintext,content
|
| 91 |
+
91,118201,"Untitled-1",1,0,"e",plaintext,content
|
| 92 |
+
92,118203,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 93 |
+
93,118364,"Untitled-1",2,0,"f",plaintext,content
|
| 94 |
+
94,118366,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 95 |
+
95,118368,"Untitled-1",3,0," ",plaintext,content
|
| 96 |
+
96,118370,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 97 |
+
97,118595,"Untitled-1",4,0,"f",plaintext,content
|
| 98 |
+
98,118598,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 99 |
+
99,118601,"Untitled-1",5,0,"i",plaintext,content
|
| 100 |
+
100,118603,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 101 |
+
101,118609,"Untitled-1",6,0,"n",plaintext,content
|
| 102 |
+
102,118610,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 103 |
+
103,118908,"Untitled-1",6,1,"",plaintext,content
|
| 104 |
+
104,119197,"Untitled-1",6,0,"b",plaintext,content
|
| 105 |
+
105,119199,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 106 |
+
106,120699,"Untitled-1",7,0,"o",plaintext,content
|
| 107 |
+
107,120701,"Untitled-1",8,0,"",plaintext,selection_keyboard
|
| 108 |
+
108,120768,"Untitled-1",8,0,"n",plaintext,content
|
| 109 |
+
109,120771,"Untitled-1",9,0,"",plaintext,selection_keyboard
|
| 110 |
+
110,120870,"Untitled-1",9,0,"a",plaintext,content
|
| 111 |
+
111,120873,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 112 |
+
112,121236,"Untitled-1",10,0,"c",plaintext,content
|
| 113 |
+
113,121238,"Untitled-1",11,0,"",plaintext,selection_keyboard
|
| 114 |
+
114,121257,"Untitled-1",11,0,"c",plaintext,content
|
| 115 |
+
115,121259,"Untitled-1",12,0,"",plaintext,selection_keyboard
|
| 116 |
+
116,121264,"Untitled-1",12,0,"i",plaintext,content
|
| 117 |
+
117,121266,"Untitled-1",13,0,"",plaintext,selection_keyboard
|
| 118 |
+
118,123597,"Untitled-1",13,0,"()",plaintext,content
|
| 119 |
+
119,123599,"Untitled-1",14,0,"",plaintext,selection_keyboard
|
| 120 |
+
120,125013,"Untitled-1",0,15,"def fibonacci(n):\n",plaintext,content
|
| 121 |
+
121,127870,"Untitled-1",18,0," if n <= 1:\n",plaintext,content
|
| 122 |
+
122,134583,"Untitled-1",33,0," return n\n",plaintext,content
|
| 123 |
+
123,135749,"Untitled-1",50,0," else:\n",plaintext,content
|
| 124 |
+
124,136733,"Untitled-1",60,0," return fibonacci(n-1) + fibonacci(n-2)\n",plaintext,content
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-172472a9-c7ba-4eb8-8864-d6b51755816f1755676569546-2025_08_20-09.56.18.547/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-18c1122b-fb8e-4e24-ad01-0c4ee70d5bc01763555122672-2025_11_19-13.25.30.350/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-19886bd2-bbe1-43e2-a1cc-a5022d9247ba1763364044525-2025_11_17-08.20.54.212/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1a3a672d-e65d-446e-bbe2-839587893f7a1765464330546-2025_12_11-15.45.42.895/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1bc1f935-6cba-40dc-8614-b9589f348ebe1756235407246-2025_08_26-21.10.10.19/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1c7c7dc7-1c60-41b6-b978-9aa317222b551758733938891-2025_09_24-19.12.33.158/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1e01b6fa-0da6-410c-a989-4fb4b34be9451766512076737-2025_12_23-18.48.05.304/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1e591805-0455-42eb-ab51-85bbcb59890f1764868794984-2025_12_04-18.20.01.983/source.csv
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,206,"Untitled-1",0,0,"",plaintext,tab
|
| 3 |
+
2,547,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"6:20:01 PM [info] Activating crowd-code\n6:20:02 PM [info] Recording started\n6:20:02 PM [info] Initializing git provider using file system watchers...\n6:20:02 PM [info] No workspace folder found\n",Log,tab
|
| 4 |
+
3,2307,"extension-output-pdoom-org.crowd-code-#1-crowd-code",194,0,"6:20:04 PM [info] Retrying git provider initialization...\n6:20:04 PM [info] No workspace folder found\n",Log,content
|
| 5 |
+
4,47092,"Untitled-1",0,0,"",plaintext,tab
|
| 6 |
+
5,47196,"Untitled-2",0,0,"",plaintext,tab
|
| 7 |
+
6,48288,"Untitled-1",0,0,"",plaintext,tab
|
| 8 |
+
7,55615,"Untitled-1",0,0,"""",plaintext,content
|
| 9 |
+
8,55618,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 10 |
+
9,55736,"Untitled-1",1,0,"""",plaintext,content
|
| 11 |
+
10,55738,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 12 |
+
11,55861,"Untitled-1",2,0,"""",plaintext,content
|
| 13 |
+
12,55863,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 14 |
+
13,57084,"Untitled-1",3,0,"h",plaintext,content
|
| 15 |
+
14,57086,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 16 |
+
15,57179,"Untitled-1",4,0,"e",plaintext,content
|
| 17 |
+
16,57180,"Untitled-1",5,0,"",plaintext,selection_keyboard
|
| 18 |
+
17,57379,"Untitled-1",5,0,"l",plaintext,content
|
| 19 |
+
18,57380,"Untitled-1",6,0,"",plaintext,selection_keyboard
|
| 20 |
+
19,57507,"Untitled-1",6,0,"l",plaintext,content
|
| 21 |
+
20,57509,"Untitled-1",7,0,"",plaintext,selection_keyboard
|
| 22 |
+
21,57597,"Untitled-1",7,0,"o",plaintext,content
|
| 23 |
+
22,57599,"Untitled-1",8,0,"",plaintext,selection_keyboard
|
| 24 |
+
23,57763,"Untitled-1",8,0," ",plaintext,content
|
| 25 |
+
24,57765,"Untitled-1",9,0,"",plaintext,selection_keyboard
|
| 26 |
+
25,57769,"Untitled-1",9,0,"w",plaintext,content
|
| 27 |
+
26,57771,"Untitled-1",10,0,"",plaintext,selection_keyboard
|
| 28 |
+
27,57836,"Untitled-1",10,0,"o",plaintext,content
|
| 29 |
+
28,57838,"Untitled-1",11,0,"",plaintext,selection_keyboard
|
| 30 |
+
29,57900,"Untitled-1",11,0,"r",plaintext,content
|
| 31 |
+
30,57902,"Untitled-1",12,0,"",plaintext,selection_keyboard
|
| 32 |
+
31,58166,"Untitled-1",12,0,"l",plaintext,content
|
| 33 |
+
32,58168,"Untitled-1",13,0,"",plaintext,selection_keyboard
|
| 34 |
+
33,58201,"Untitled-1",13,0,"d",plaintext,content
|
| 35 |
+
34,58203,"Untitled-1",14,0,"",plaintext,selection_keyboard
|
| 36 |
+
35,58275,"Untitled-1",14,0," ",plaintext,content
|
| 37 |
+
36,58277,"Untitled-1",15,0,"",plaintext,selection_keyboard
|
| 38 |
+
37,58532,"Untitled-1",15,0,"f",plaintext,content
|
| 39 |
+
38,58533,"Untitled-1",16,0,"",plaintext,selection_keyboard
|
| 40 |
+
39,58536,"Untitled-1",16,0,"u",plaintext,content
|
| 41 |
+
40,58537,"Untitled-1",17,0,"",plaintext,selection_keyboard
|
| 42 |
+
41,58606,"Untitled-1",17,0,"n",plaintext,content
|
| 43 |
+
42,58608,"Untitled-1",18,0,"",plaintext,selection_keyboard
|
| 44 |
+
43,58693,"Untitled-1",18,0,"c",plaintext,content
|
| 45 |
+
44,58695,"Untitled-1",19,0,"",plaintext,selection_keyboard
|
| 46 |
+
45,58902,"Untitled-1",19,0,"t",plaintext,content
|
| 47 |
+
46,58904,"Untitled-1",20,0,"",plaintext,selection_keyboard
|
| 48 |
+
47,58905,"Untitled-1",20,0,"i",plaintext,content
|
| 49 |
+
48,58906,"Untitled-1",21,0,"",plaintext,selection_keyboard
|
| 50 |
+
49,58939,"Untitled-1",21,0,"o",plaintext,content
|
| 51 |
+
50,58941,"Untitled-1",22,0,"",plaintext,selection_keyboard
|
| 52 |
+
51,59029,"Untitled-1",22,0,"n",plaintext,content
|
| 53 |
+
52,59030,"Untitled-1",23,0,"",plaintext,selection_keyboard
|
| 54 |
+
53,59247,"Untitled-1",23,0,"""",plaintext,content
|
| 55 |
+
54,59248,"Untitled-1",24,0,"",plaintext,selection_keyboard
|
| 56 |
+
55,59378,"Untitled-1",24,0,"""",plaintext,content
|
| 57 |
+
56,59380,"Untitled-1",25,0,"",plaintext,selection_keyboard
|
| 58 |
+
57,59518,"Untitled-1",25,0,"""",plaintext,content
|
| 59 |
+
58,59520,"Untitled-1",26,0,"",plaintext,selection_keyboard
|
| 60 |
+
59,59804,"Untitled-1",26,0,"\n",plaintext,content
|
| 61 |
+
60,60965,"Untitled-1",27,0,"d",plaintext,content
|
| 62 |
+
61,60967,"Untitled-1",28,0,"",plaintext,selection_keyboard
|
| 63 |
+
62,61056,"Untitled-1",28,0,"e",plaintext,content
|
| 64 |
+
63,61057,"Untitled-1",29,0,"",plaintext,selection_keyboard
|
| 65 |
+
64,61228,"Untitled-1",29,0,"f",plaintext,content
|
| 66 |
+
65,61229,"Untitled-1",30,0,"",plaintext,selection_keyboard
|
| 67 |
+
66,61313,"Untitled-1",30,0," ",plaintext,content
|
| 68 |
+
67,61314,"Untitled-1",31,0,"",plaintext,selection_keyboard
|
| 69 |
+
68,65686,"Untitled-1",31,0,"e",plaintext,content
|
| 70 |
+
69,65689,"Untitled-1",32,0,"",plaintext,selection_keyboard
|
| 71 |
+
70,65759,"Untitled-1",32,0,"h",plaintext,content
|
| 72 |
+
71,65761,"Untitled-1",33,0,"",plaintext,selection_keyboard
|
| 73 |
+
72,66047,"Untitled-1",32,1,"",plaintext,content
|
| 74 |
+
73,66174,"Untitled-1",31,1,"",plaintext,content
|
| 75 |
+
74,66260,"Untitled-1",31,0,"h",plaintext,content
|
| 76 |
+
75,66261,"Untitled-1",32,0,"",plaintext,selection_keyboard
|
| 77 |
+
76,66335,"Untitled-1",32,0,"e",plaintext,content
|
| 78 |
+
77,66337,"Untitled-1",33,0,"",plaintext,selection_keyboard
|
| 79 |
+
78,66493,"Untitled-1",33,0,"l",plaintext,content
|
| 80 |
+
79,66495,"Untitled-1",34,0,"",plaintext,selection_keyboard
|
| 81 |
+
80,66622,"Untitled-1",34,0,"l",plaintext,content
|
| 82 |
+
81,66623,"Untitled-1",35,0,"",plaintext,selection_keyboard
|
| 83 |
+
82,67322,"Untitled-1",35,0,"o",plaintext,content
|
| 84 |
+
83,67324,"Untitled-1",36,0,"",plaintext,selection_keyboard
|
| 85 |
+
84,67598,"Untitled-1",36,0,"_",plaintext,content
|
| 86 |
+
85,67599,"Untitled-1",37,0,"",plaintext,selection_keyboard
|
| 87 |
+
86,67816,"Untitled-1",37,0,"w",plaintext,content
|
| 88 |
+
87,67818,"Untitled-1",38,0,"",plaintext,selection_keyboard
|
| 89 |
+
88,67911,"Untitled-1",38,0,"o",plaintext,content
|
| 90 |
+
89,67913,"Untitled-1",39,0,"",plaintext,selection_keyboard
|
| 91 |
+
90,67972,"Untitled-1",39,0,"r",plaintext,content
|
| 92 |
+
91,67974,"Untitled-1",40,0,"",plaintext,selection_keyboard
|
| 93 |
+
92,68182,"Untitled-1",40,0,"l",plaintext,content
|
| 94 |
+
93,68183,"Untitled-1",41,0,"",plaintext,selection_keyboard
|
| 95 |
+
94,68227,"Untitled-1",41,0,"d",plaintext,content
|
| 96 |
+
95,68229,"Untitled-1",42,0,"",plaintext,selection_keyboard
|
| 97 |
+
96,68494,"Untitled-1",42,0,"()",plaintext,content
|
| 98 |
+
97,68496,"Untitled-1",43,0,"",plaintext,selection_keyboard
|
| 99 |
+
98,68600,"Untitled-1",43,1,")",plaintext,content
|
| 100 |
+
99,68602,"Untitled-1",44,0,"",plaintext,selection_keyboard
|
| 101 |
+
100,69218,"Untitled-1",44,0,":",plaintext,content
|
| 102 |
+
101,69220,"Untitled-1",45,0,"",plaintext,selection_keyboard
|
| 103 |
+
102,69450,"Untitled-1",45,0,"\n",plaintext,content
|
| 104 |
+
103,75172,"Untitled-1",46,0," ",plaintext,content
|
| 105 |
+
104,78318,"Untitled-1",50,0,"p",plaintext,content
|
| 106 |
+
105,78320,"Untitled-1",51,0,"",plaintext,selection_keyboard
|
| 107 |
+
106,78508,"Untitled-1",51,0,"r",plaintext,content
|
| 108 |
+
107,78510,"Untitled-1",52,0,"",plaintext,selection_keyboard
|
| 109 |
+
108,78663,"Untitled-1",52,0,"i",plaintext,content
|
| 110 |
+
109,78664,"Untitled-1",53,0,"",plaintext,selection_keyboard
|
| 111 |
+
110,78669,"Untitled-1",53,0,"n",plaintext,content
|
| 112 |
+
111,78670,"Untitled-1",54,0,"",plaintext,selection_keyboard
|
| 113 |
+
112,78692,"Untitled-1",54,0,"t",plaintext,content
|
| 114 |
+
113,78693,"Untitled-1",55,0,"",plaintext,selection_keyboard
|
| 115 |
+
114,127340,"Untitled-1",45,10,"\n print",plaintext,selection_mouse
|
| 116 |
+
115,128003,"Untitled-1",55,0,"",plaintext,selection_mouse
|
| 117 |
+
116,407450,"TERMINAL",0,0,"sed -i '3s/print/print(""hello world"")/' Untitled-1",,terminal_command
|
| 118 |
+
117,408391,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 119 |
+
118,409803,"TERMINAL",0,0,"",,terminal_focus
|
| 120 |
+
119,409807,"Untitled-1",0,0,"",plaintext,tab
|
| 121 |
+
120,413092,"TERMINAL",0,0,"bash",,terminal_focus
|
| 122 |
+
121,414047,"TERMINAL",0,0,"bash",,terminal_focus
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1f4647f6-404b-450c-8af8-65b66087950a1764326087309-2025_11_28-11.34.59.44/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1fa9b85d-3794-4f3b-b7a0-5170b7d2faaa1762362332596-2025_11_05-18.05.39.648/source.csv
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",0,0,"/*---------------------------------------------------------------------------------------------\n * Copyright (c) Microsoft Corporation. All rights reserved.\n * Licensed under the MIT License. See License.txt in the project root for license information.\n *--------------------------------------------------------------------------------------------*/\n\n/** @jsxRuntime automatic */\n/** @jsxImportSource ../../../../prompt/jsx-runtime/ */\nimport { CopilotContentExclusionManager, StatusBarEvent } from '../../contentExclusion/contentExclusionManager';\nimport { ICompletionsContextService } from '../../context';\nimport { logger, LogTarget } from '../../logger';\n\nimport { IInstantiationService, ServicesAccessor } from '../../../../../../../util/vs/platform/instantiation/common/instantiation';\nimport { ICompletionsTelemetryService } from '../../../../bridge/src/completionsTelemetryServiceBridge';\nimport { DataPipe, VirtualPrompt } from '../../../../prompt/src/components/virtualPrompt';\nimport { TokenizerName } from '../../../../prompt/src/tokenization';\nimport { CancellationToken, Position } from '../../../../types/src';\nimport { CompletionState } from '../../completionState';\nimport { telemetryException, TelemetryWithExp } from '../../telemetry';\nimport { TextDocumentContents } from '../../textDocument';\nimport { CodeSnippets } from '../components/codeSnippets';\nimport { CompletionsContext } from '../components/completionsContext';\nimport { CompletionsPromptOk, CompletionsPromptRenderer } from '../components/completionsPromptRenderer';\nimport { ContextProviderBridge } from '../components/contextProviderBridge';\nimport { CurrentFile } from '../components/currentFile';\nimport { DocumentMarker } from '../components/marker';\nimport { RecentEdits } from '../components/recentEdits';\nimport { SimilarFiles } from '../components/similarFiles';\nimport { splitContextCompletionsPrompt } from '../components/splitContextPrompt';\nimport { SplitContextPromptRenderer } from '../components/splitContextPromptRenderer';\nimport { Traits } from '../components/traits';\nimport {\n\tContextProviderTelemetry,\n\tmatchContextItems,\n\tResolvedContextItem,\n\ttelemetrizeContextItems,\n\tuseContextProviderAPI,\n} from '../contextProviderRegistry';\nimport { getCodeSnippetsFromContextItems } from '../contextProviders/codeSnippets';\nimport {\n\tCodeSnippetWithId,\n\tSupportedContextItemWithId,\n\tTraitWithId,\n} from '../contextProviders/contextItemSchemas';\nimport { getTraitsFromContextItems, ReportTraitsTelemetry } from '../contextProviders/traits';\nimport { componentStatisticsToPromptMatcher, ContextProviderStatistics } from '../contextProviderStatistics';\nimport {\n\t_contextTooShort,\n\t_copilotContentExclusion,\n\t_promptCancelled,\n\t_promptError,\n\tgetPromptOptions,\n\tMIN_PROMPT_CHARS,\n\tPromptResponse,\n\ttrimLastLine,\n} from '../prompt';\nimport { isIncludeNeighborFilesActive } from '../similarFiles/neighborFiles';\nimport {\n\tCompletionsPromptFactory,\n\tCompletionsPromptOptions,\n\tPromptOpts,\n} from './completionsPromptFactory';\n\nexport type CompletionRequestDocument = TextDocumentContents;\n\nexport type CompletionRequestData = {\n\tdocument: CompletionRequestDocument;\n\tposition: Position;\n\ttelemetryData: TelemetryWithExp;\n\tcancellationToken?: CancellationToken;\n\t// see inlineCompletions data param\n\tdata?: unknown;\n\t// Context provider items\n\ttraits?: TraitWithId[];\n\tcodeSnippets?: CodeSnippetWithId[];\n\tturnOffSimilarFiles?: boolean;\n\tsuffixMatchThreshold?: number;\n\tmaxPromptTokens: number;\n\ttokenizer?: TokenizerName;\n};\n\nexport function isCompletionRequestData(data: unknown): data is CompletionRequestData {\n\tif (!data || typeof data !== 'object') { return false; }\n\n\tconst req = data as Partial<CompletionRequestData>;\n\n\t// Check document\n\tif (!req.document) { return false; }\n\n\t// Check position\n\tif (!req.position) { return false; }\n\tif (req.position.line === undefined) { return false; }\n\tif (req.position.character === undefined) { return false; }\n\n\t// Check telemetryData\n\tif (!req.telemetryData) { return false; }\n\n\treturn true;\n}\n\nexport enum PromptOrdering {\n\tDefault = 'default',\n\tSplitContext = 'splitContext',\n}\n\ntype DeclarativePromptFunction = typeof defaultCompletionsPrompt;\ntype AvailableDeclarativePrompts = {\n\t[K in PromptOrdering]: {\n\t\tpromptFunction: DeclarativePromptFunction;\n\t\trenderer: typeof CompletionsPromptRenderer;\n\t};\n};\n\nconst availableDeclarativePrompts: AvailableDeclarativePrompts = {\n\t[PromptOrdering.Default]: {\n\t\tpromptFunction: defaultCompletionsPrompt,\n\t\trenderer: CompletionsPromptRenderer,\n\t},\n\t[PromptOrdering.SplitContext]: {\n\t\tpromptFunction: splitContextCompletionsPrompt,\n\t\trenderer: SplitContextPromptRenderer,\n\t},\n};\n\n// The weights mimic the PromptPriorityList from prompt/src/wishlist.ts\nfunction defaultCompletionsPrompt(accessor: ServicesAccessor) {\n\tconst ctx = accessor.get(ICompletionsContextService);\n\treturn (\n\t\t<>\n\t\t\t<CompletionsContext>\n\t\t\t\t<DocumentMarker ctx={ctx} weight={0.7} />\n\t\t\t\t<Traits weight={0.6} />\n\t\t\t\t<CodeSnippets ctx={ctx} weight={0.9} />\n\t\t\t\t<SimilarFiles ctx={ctx} weight={0.8} />\n\t\t\t\t<RecentEdits ctx={ctx} weight={0.99} />\n\t\t\t</CompletionsContext>\n\t\t\t<CurrentFile weight={1} />\n\t\t</>\n\t);\n}\n\n// Exported for testing\nexport class ComponentsCompletionsPromptFactory implements CompletionsPromptFactory {\n\tprivate virtualPrompt: VirtualPrompt;\n\tprivate pipe: DataPipe;\n\tprivate renderer: CompletionsPromptRenderer;\n\tprivate promptOrdering: PromptOrdering;\n\tprivate logTarget;\n\n\tconstructor(\n\t\tvirtualPrompt: VirtualPrompt | undefined = undefined,\n\t\tordering: PromptOrdering | undefined = undefined,\n\t\t@ICompletionsContextService private readonly ctx: ICompletionsContextService,\n\t\t@IInstantiationService private readonly instantiationService: IInstantiationService,\n\t\t@ICompletionsTelemetryService private readonly completionsTelemetryService: ICompletionsTelemetryService,\n\t) {\n\t\tthis.logTarget = this.ctx.get(LogTarget);\n\t\tthis.promptOrdering = ordering ?? PromptOrdering.Default;\n\t\tthis.virtualPrompt = virtualPrompt ?? new VirtualPrompt(this.completionsPrompt());\n\t\tthis.pipe = this.virtualPrompt.createPipe();\n\t\tthis.renderer = this.getRenderer();\n\t}\n\n\tasync prompt(opts: CompletionsPromptOptions, cancellationToken?: CancellationToken): Promise<PromptResponse> {\n\t\ttry {\n\t\t\treturn await this.createPromptUnsafe(opts, cancellationToken);\n\t\t} catch (e) {\n\t\t\treturn this.errorPrompt(e as Error);\n\t\t}\n\t}\n\n\tasync createPromptUnsafe(\n\t\t{ completionId, completionState, telemetryData, promptOpts }: CompletionsPromptOptions,\n\t\tcancellationToken?: CancellationToken\n\t): Promise<PromptResponse> {\n\t\tconst { maxPromptLength, suffixPercent, suffixMatchThreshold } = this.instantiationService.invokeFunction(getPromptOptions,\n\t\t\ttelemetryData,\n\t\t\tcompletionState.textDocument.detectedLanguageId\n\t\t);\n\n\t\tconst failFastPrompt = await this.failFastPrompt(\n\t\t\tcompletionState.textDocument,\n\t\t\tcompletionState.position,\n\t\t\tsuffixPercent,\n\t\t\tcancellationToken\n\t\t);\n\t\tif (failFastPrompt) {\n\t\t\treturn failFastPrompt;\n\t\t}\n\n\t\t// TODO: Prompt ordering changes are triggered by ExP changes.\n\t\t// TODO@benibenj remove this as its always true (except in tests)\n\t\tconst promptOrdering = promptOpts?.separateContext ? PromptOrdering.SplitContext : PromptOrdering.Default;\n\t\tthis.setPromptOrdering(promptOrdering);\n\n\t\tconst start = performance.now();\n\n\t\tconst { traits, codeSnippets, turnOffSimilarFiles, resolvedContextItems } = await this.resolveContext(\n\t\t\tcompletionId,\n\t\t\tcompletionState,\n\t\t\ttelemetryData,\n\t\t\tcancellationToken,\n\t\t\tpromptOpts\n\t\t);\n\n\t\tawait this.updateComponentData(\n\t\t\tcompletionState.textDocument,\n\t\t\tcompletionState.position,\n\t\t\ttraits,\n\t\t\tcodeSnippets,\n\t\t\ttelemetryData,\n\t\t\tturnOffSimilarFiles,\n\t\t\tmaxPromptLength,\n\t\t\tcancellationToken,\n\t\t\tpromptOpts,\n\t\t\tsuffixMatchThreshold,\n\t\t\tpromptOpts?.tokenizer\n\t\t);\n\n\t\tif (cancellationToken?.isCancellationRequested) {\n\t\t\treturn _promptCancelled;\n\t\t}\n\n\t\tconst snapshot = this.virtualPrompt.snapshot(cancellationToken);\n\t\tconst snapshotStatus = snapshot.status;\n\t\tif (snapshotStatus === 'cancelled') {\n\t\t\treturn _promptCancelled;\n\t\t} else if (snapshotStatus === 'error') {\n\t\t\treturn this.errorPrompt(snapshot.error);\n\t\t}\n\n\t\tconst rendered = this.renderer.render(\n\t\t\tsnapshot.snapshot!,\n\t\t\t{\n\t\t\t\tdelimiter: '\n',\n\t\t\t\ttokenizer: promptOpts?.tokenizer,\n\t\t\t\tpromptTokenLimit: maxPromptLength,\n\t\t\t\tsuffixPercent: suffixPercent,\n\t\t\t\tlanguageId: completionState.textDocument.detectedLanguageId,\n\t\t\t},\n\t\t\tcancellationToken\n\t\t);\n\t\tif (rendered.status === 'cancelled') {\n\t\t\treturn _promptCancelled;\n\t\t} else if (rendered.status === 'error') {\n\t\t\treturn this.errorPrompt(rendered.error);\n\t\t}\n\n\t\tconst [prefix, trailingWs] = trimLastLine(rendered.prefix);\n\t\tconst renderedTrimmed = { ...rendered, prefix };\n\n\t\tlet contextProvidersTelemetry: ContextProviderTelemetry[] | undefined = undefined;\n\t\tconst languageId = completionState.textDocument.detectedLanguageId;\n\t\tif (this.instantiationService.invokeFunction(useContextProviderAPI, languageId, telemetryData)) {\n\t\t\tconst promptMatcher = componentStatisticsToPromptMatcher(rendered.metadata.componentStatistics);\n\t\t\tthis.ctx\n\t\t\t\t.get(ContextProviderStatistics)\n\t\t\t\t.getStatisticsForCompletion(completionId)\n\t\t\t\t.computeMatch(promptMatcher);\n\t\t\tcontextProvidersTelemetry = telemetrizeContextItems(this.ctx, completionId, resolvedContextItems);\n\t\t\t// To support generating context provider metrics of completion in COffE.\n\t\t\tlogger.debug(this.logTarget, `Context providers telemetry: '${JSON.stringify(contextProvidersTelemetry)}'`);\n\t\t}\n\t\tconst end = performance.now();\n\t\tthis.resetIfEmpty(rendered);\n\t\treturn this.successPrompt(renderedTrimmed, end, start, trailingWs, contextProvidersTelemetry);\n\t}\n\n\tprivate async updateComponentData(\n\t\ttextDocument: CompletionRequestDocument,\n\t\tposition: Position,\n\t\ttraits: TraitWithId[] | undefined,\n\t\tcodeSnippets: CodeSnippetWithId[] | undefined,\n\t\ttelemetryData: TelemetryWithExp,\n\t\tturnOffSimilarFiles: boolean,\n\t\tmaxPromptLength: number,\n\t\tcancellationToken?: CancellationToken,\n\t\topts: PromptOpts = {},\n\t\tsuffixMatchThreshold?: number,\n\t\ttokenizer?: TokenizerName\n\t) {\n\t\tconst completionRequestData = this.createRequestData(\n\t\t\ttextDocument,\n\t\t\tposition,\n\t\t\ttelemetryData,\n\t\t\tcancellationToken,\n\t\t\topts,\n\t\t\tmaxPromptLength,\n\t\t\ttraits,\n\t\t\tcodeSnippets,\n\t\t\tturnOffSimilarFiles,\n\t\t\tsuffixMatchThreshold,\n\t\t\ttokenizer\n\t\t);\n\t\tawait this.pipe.pump(completionRequestData);\n\t}\n\n\tprivate async resolveContext(\n\t\tcompletionId: string,\n\t\tcompletionState: CompletionState,\n\t\ttelemetryData: TelemetryWithExp,\n\t\tcancellationToken?: CancellationToken,\n\t\topts: PromptOpts = {}\n\t): Promise<{\n\t\ttraits: TraitWithId[] | undefined;\n\t\tcodeSnippets: CodeSnippetWithId[] | undefined;\n\t\tturnOffSimilarFiles: boolean;\n\t\tresolvedContextItems: ResolvedContextItem[];\n\t}> {\n\t\tlet resolvedContextItems: ResolvedContextItem[] = [];\n\t\tlet traits: TraitWithId[] | undefined;\n\t\tlet codeSnippets: CodeSnippetWithId[] | undefined;\n\t\tlet turnOffSimilarFiles = false;\n\t\tif (this.instantiationService.invokeFunction(useContextProviderAPI, completionState.textDocument.detectedLanguageId, telemetryData)) {\n\t\t\tresolvedContextItems = await this.ctx.get(ContextProviderBridge).resolution(completionId);\n\t\t\tconst { textDocument } = completionState;\n\t\t\t// Turn off neighboring files if:\n\t\t\t// - it's not explicitly enabled via EXP flag\n\t\t\t// - there are matched context providers\n\t\t\tconst matchedContextItems = resolvedContextItems.filter(matchContextItems);\n\t\t\tif (!this.instantiationService.invokeFunction(similarFilesEnabled, textDocument.detectedLanguageId, matchedContextItems, telemetryData)) {\n\t\t\t\tturnOffSimilarFiles = true;\n\t\t\t}\n\n\t\t\ttraits = await this.instantiationService.invokeFunction(getTraitsFromContextItems, completionId, matchedContextItems);\n\t\t\tvoid this.instantiationService.invokeFunction(ReportTraitsTelemetry,\n\t\t\t\t`contextProvider.traits`,\n\t\t\t\ttraits,\n\t\t\t\ttextDocument.detectedLanguageId,\n\t\t\t\ttextDocument.detectedLanguageId, // TextDocumentContext does not have clientLanguageId\n\t\t\t\ttelemetryData\n\t\t\t);\n\n\t\t\tcodeSnippets = await this.instantiationService.invokeFunction(getCodeSnippetsFromContextItems,\n\t\t\t\tcompletionId,\n\t\t\t\tmatchedContextItems,\n\t\t\t\ttextDocument.detectedLanguageId\n\t\t\t);\n\t\t}\n\t\treturn { traits, codeSnippets, turnOffSimilarFiles, resolvedContextItems };\n\t}\n\n\tprivate async failFastPrompt(\n\t\ttextDocument: TextDocumentContents,\n\t\tposition: Position,\n\t\tsuffixPercent: number,\n\t\tcancellationToken: CancellationToken | undefined\n\t) {\n\t\tif (cancellationToken?.isCancellationRequested) {\n\t\t\treturn _promptCancelled;\n\t\t}\n\t\tif (\n\t\t\t(\n\t\t\t\tawait this.ctx\n\t\t\t\t\t.get(CopilotContentExclusionManager)\n\t\t\t\t\t.evaluate(textDocument.uri, textDocument.getText(), StatusBarEvent.UPDATE)\n\t\t\t).isBlocked\n\t\t) {\n\t\t\treturn _copilotContentExclusion;\n\t\t}\n\n\t\tconst eligibleChars = suffixPercent > 0 ? textDocument.getText().length : textDocument.offsetAt(position);\n\t\tif (eligibleChars < MIN_PROMPT_CHARS) {\n\t\t\t// Too short context\n\t\t\treturn _contextTooShort;\n\t\t}\n\t}\n\n\tprivate createRequestData(\n\t\ttextDocument: CompletionRequestDocument,\n\t\tposition: Position,\n\t\ttelemetryData: TelemetryWithExp,\n\t\tcancellationToken: CancellationToken | undefined,\n\t\topts: PromptOpts,\n\t\tmaxPromptLength: number,\n\t\ttraits?: TraitWithId[],\n\t\tcodeSnippets?: CodeSnippetWithId[],\n\t\tturnOffSimilarFiles?: boolean,\n\t\tsuffixMatchThreshold?: number,\n\t\ttokenizer?: TokenizerName\n\t): CompletionRequestData {\n\t\treturn {\n\t\t\tdocument: textDocument,\n\t\t\tposition,\n\t\t\ttelemetryData,\n\t\t\tcancellationToken,\n\t\t\tdata: opts.data,\n\t\t\ttraits,\n\t\t\tcodeSnippets,\n\t\t\tturnOffSimilarFiles,\n\t\t\tsuffixMatchThreshold,\n\t\t\tmaxPromptTokens: maxPromptLength,\n\t\t\ttokenizer,\n\t\t};\n\t}\n\n\tprivate resetIfEmpty(rendered: CompletionsPromptOk) {\n\t\tif (rendered.prefix.length === 0 && rendered.suffix.length === 0) {\n\t\t\tthis.reset();\n\t\t}\n\t}\n\n\tprivate successPrompt(\n\t\trendered: CompletionsPromptOk,\n\t\tend: number,\n\t\tstart: number,\n\t\ttrailingWs: string,\n\t\tcontextProvidersTelemetry?: ContextProviderTelemetry[]\n\t): PromptResponse {\n\t\treturn {\n\t\t\ttype: 'prompt',\n\t\t\tprompt: {\n\t\t\t\tprefix: rendered.prefix,\n\t\t\t\tprefixTokens: rendered.prefixTokens,\n\t\t\t\tsuffix: rendered.suffix,\n\t\t\t\tsuffixTokens: rendered.suffixTokens,\n\t\t\t\tcontext: rendered.context,\n\t\t\t\tisFimEnabled: rendered.suffix.length > 0,\n\t\t\t},\n\t\t\tcomputeTimeMs: end - start,\n\t\t\ttrailingWs,\n\t\t\tneighborSource: new Map(),\n\t\t\tmetadata: rendered.metadata,\n\t\t\tcontextProvidersTelemetry,\n\t\t};\n\t}\n\n\tprivate errorPrompt(error: Error): PromptResponse {\n\t\ttelemetryException(this.completionsTelemetryService, error, 'PromptComponents.CompletionsPromptFactory');\n\t\tthis.reset();\n\t\treturn _promptError;\n\t}\n\n\tprivate reset() {\n\t\tthis.renderer = this.getRenderer();\n\t\tthis.virtualPrompt = new VirtualPrompt(this.completionsPrompt());\n\t\tthis.pipe = this.virtualPrompt.createPipe();\n\t}\n\n\tprivate setPromptOrdering(ordering: PromptOrdering) {\n\t\tif (this.promptOrdering !== ordering) {\n\t\t\tthis.promptOrdering = ordering;\n\t\t\tthis.reset();\n\t\t}\n\t}\n\n\tprivate completionsPrompt() {\n\t\tconst promptFunction =\n\t\t\tavailableDeclarativePrompts[this.promptOrdering]?.promptFunction ?? defaultCompletionsPrompt;\n\t\treturn this.instantiationService.invokeFunction(promptFunction);\n\t}\n\n\tprivate getRenderer() {\n\t\tconst promptInfo =\n\t\t\tavailableDeclarativePrompts[this.promptOrdering] ?? availableDeclarativePrompts[PromptOrdering.Default];\n\t\treturn new promptInfo.renderer();\n\t}\n}\n\n// Similar files is enabled if:\n// - the languageId is C/C++.\n// - it's explicitly enabled via EXP flag or config.\n// - no code snippets are provided (which includes the case when all providers error).\nfunction similarFilesEnabled(\n\taccessor: ServicesAccessor,\n\tdetectedLanguageId: string,\n\tmatchedContextItems: ResolvedContextItem<SupportedContextItemWithId>[],\n\ttelemetryData: TelemetryWithExp\n) {\n\tconst cppLanguageIds = ['cpp', 'c'];\n\tconst includeNeighboringFiles =\n\t\tisIncludeNeighborFilesActive(accessor, detectedLanguageId, telemetryData) || cppLanguageIds.includes(detectedLanguageId);\n\treturn (\n\t\tincludeNeighboringFiles || !matchedContextItems.some(ci => ci.data.some(item => item.type === 'CodeSnippet'))\n\t);\n}\n",typescriptreact,tab
|
| 3 |
+
2,289,"tasks",0,0,"",Log,tab
|
| 4 |
+
3,290,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",0,0,"",typescriptreact,tab
|
| 5 |
+
4,343,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"6:05:39 PM [info] Activating crowd-code\n6:05:39 PM [info] Recording started\n6:05:39 PM [info] Initializing git provider using file system watchers...\n6:05:39 PM [info] Git repository found\n6:05:39 PM [info] Git provider initialized successfully\n6:05:39 PM [info] Initial git state: [object Object]\n",Log,tab
|
| 6 |
+
5,12297,"extension-output-pdoom-org.crowd-code-#1-crowd-code",298,0,"",Log,selection_mouse
|
| 7 |
+
6,13219,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",0,0,"",typescriptreact,tab
|
| 8 |
+
7,347453,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",5114,0,"",typescriptreact,selection_command
|
| 9 |
+
8,347571,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",5070,0,"",typescriptreact,selection_command
|
| 10 |
+
9,1004602,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",5026,0,"",typescriptreact,selection_command
|
| 11 |
+
10,1004729,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",4982,0,"",typescriptreact,selection_command
|
| 12 |
+
11,1004853,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",4954,0,"",typescriptreact,selection_command
|
| 13 |
+
12,1005293,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",4908,0,"",typescriptreact,selection_command
|
| 14 |
+
13,2090202,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 15 |
+
14,2092221,"TERMINAL",0,0,"",,terminal_focus
|
| 16 |
+
15,2092222,"src/extension/completions-core/vscode-node/lib/src/prompt/completionsPromptFactory/componentsCompletionsPromptFactory.tsx",0,0,"",typescriptreact,tab
|
| 17 |
+
16,2093995,"TERMINAL",0,0,"cd ..",,terminal_command
|
| 18 |
+
17,2094451,"TERMINAL",0,0,"ls",,terminal_command
|
| 19 |
+
18,2094466,"TERMINAL",0,0,"]633;C[0m[01;34mcleanrl[0m [01;34mcrowd-code-player[0m [01;34mcrowd-pilot[0m [01;34mjafar[0m [01;34mjax_cache[0m [01;34mmaxtext[0m [01;34mnpm-global[0m [01;34moai-compatible-copilot[0m [01;34msbatch-runner[0m [01;34mStoix[0m [01;34mvscode-crowd-pilot-chat[0m\r\n]0;franz.srambical@hai-login2:~",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-204cbf10-93bf-4708-b946-a2b6194d891b1758618195751-2025_09_23-11.03.19.580/source.csv
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,2,"genie.py",0,0,"from typing import Dict\n\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsCausal\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nnx.Module):\n """"""Genie model""""""\n\n def __init__(\n self,\n in_dim: int,\n tokenizer_dim: int,\n tokenizer_ffn_dim: int,\n latent_patch_dim: int,\n num_patch_latents: int,\n patch_size: int,\n tokenizer_num_blocks: int,\n tokenizer_num_heads: int,\n lam_dim: int,\n lam_ffn_dim: int,\n latent_action_dim: int,\n num_latent_actions: int,\n lam_patch_size: int,\n lam_num_blocks: int,\n lam_num_heads: int,\n lam_co_train: bool,\n use_gt_actions: bool,\n dyna_type: str,\n dyna_dim: int,\n dyna_ffn_dim: int,\n dyna_num_blocks: int,\n dyna_num_heads: int,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n dropout: float = 0.0,\n mask_limit: float = 0.0,\n ):\n # --- Tokenizer ---\n self.in_dim = in_dim\n self.tokenizer_dim = tokenizer_dim\n self.tokenizer_ffn_dim = tokenizer_ffn_dim\n self.latent_patch_dim = latent_patch_dim\n self.num_patch_latents = num_patch_latents\n self.patch_size = patch_size\n self.tokenizer_num_blocks = tokenizer_num_blocks\n self.tokenizer_num_heads = tokenizer_num_heads\n # --- LAM ---\n self.lam_dim = lam_dim\n self.lam_ffn_dim = lam_ffn_dim\n self.latent_action_dim = latent_action_dim\n self.num_latent_actions = num_latent_actions\n self.lam_patch_size = lam_patch_size\n self.lam_num_blocks = lam_num_blocks\n self.lam_num_heads = lam_num_heads\n self.lam_co_train = lam_co_train\n self.use_gt_actions = use_gt_actions\n # --- Dynamics ---\n self.dyna_type = dyna_type\n self.dyna_dim = dyna_dim\n self.dyna_ffn_dim = dyna_ffn_dim\n self.dyna_num_blocks = dyna_num_blocks\n self.dyna_num_heads = dyna_num_heads\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.dropout = dropout\n self.mask_limit = mask_limit\n self.decode = decode\n\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n if self.use_gt_actions:\n self.action_embed = nnx.Embed(\n self.num_latent_actions, self.latent_action_dim, rngs=rngs\n )\n self.lam = None\n else:\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.action_embed = None\n if self.dyna_type == ""maskgit"":\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n elif self.dyna_type == ""causal"":\n self.dynamics = DynamicsCausal(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n else:\n raise ValueError(f""Invalid dynamics type: {self.dyna_type}"")\n\n def __call__(\n self,\n batch: Dict[str, jax.Array],\n training: bool = True,\n ) -> Dict[str, jax.Array]:\n videos_BTHWC = batch[""videos""]\n tokenizer_outputs = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_indices_BTN = tokenizer_outputs[""indices""]\n latent_actions_BTm11L = None\n action_embeddings_BTm11L = None\n if self.use_gt_actions:\n assert self.action_embed is not None\n action_indices_E = None\n action_embeddings_BT1L = self.action_embed(batch[""actions""]).reshape(\n *batch[""actions""].shape[:2], 1, self.latent_action_dim\n )\n action_embeddings_BTm11L = action_embeddings_BT1L[:, 1:]\n else:\n assert self.lam is not None\n lam_outputs = self.lam.vq_encode(videos_BTHWC, training=False)\n z_q_BTm11L = lam_outputs[""z_q""]\n action_indices_E = lam_outputs[""indices""]\n latent_actions_BTm11L = jax.lax.cond(\n self.lam_co_train,\n lambda: z_q_BTm11L,\n lambda: jax.lax.stop_gradient(z_q_BTm11L),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(token_indices_BTN),\n latent_actions=(\n action_embeddings_BTm11L\n if self.use_gt_actions\n else latent_actions_BTm11L\n ),\n )\n outputs[""mask_rng""] = batch[""rng""]\n dyna_logits_BTNV, dyna_mask = self.dynamics(outputs, training)\n outputs[""token_logits""] = dyna_logits_BTNV\n outputs[""mask""] = dyna_mask\n mle_indices_BTN = jnp.argmax(outputs[""token_logits""], axis=-1)\n H, W = batch[""videos""].shape[2:4]\n outputs[""recon""] = self.tokenizer.decode(mle_indices_BTN, (H, W))\n if action_indices_E is not None:\n outputs[""lam_indices""] = action_indices_E\n return outputs\n\n def sample(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n maskgit_steps: int = 25,\n ) -> tuple[jax.Array, jax.Array]:\n if self.dyna_type == ""maskgit"":\n return self.sample_maskgit(\n batch, seq_len, maskgit_steps, temperature, sample_argmax\n )\n elif self.dyna_type == ""causal"":\n return self.sample_causal(batch, seq_len, temperature, sample_argmax)\n else:\n raise ValueError(f""Dynamics model type unknown: {self.dyna_type}"")\n\n def sample_maskgit(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> tuple[jax.Array, jax.Array]:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n P: S * N\n """"""\n assert isinstance(self.dynamics, DynamicsMaskGIT)\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n init_logits_BSNV = jnp.zeros(\n shape=(*token_idxs_BSN.shape, self.num_patch_latents)\n )\n if self.use_gt_actions:\n assert self.action_embed is not None\n latent_actions_BT1L = self.action_embed(batch[""actions""]).reshape(\n *batch[""actions""].shape[:2], 1, self.latent_action_dim\n )\n latent_actions_BTm11L = latent_actions_BT1L[:, 1:]\n action_tokens_EL = latent_actions_BTm11L.reshape(-1, self.latent_action_dim)\n else:\n assert self.lam is not None\n latent_actions_E = batch[""latent_actions""]\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n\n # --- Extract submodule state ---\n dynamics_state = nnx.state(self.dynamics)\n\n @nnx.scan(in_axes=(nnx.Carry, 0), out_axes=nnx.Carry)\n def maskgit_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array, jax.Array],\n step: jax.Array,\n ) -> tuple[jax.Array, jax.Array, jax.Array, jax.Array, jax.Array]:\n rng, token_idxs_BSN, logits_BSNV, mask_BSN, action_tokens_EL = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # We need to reconstruct the submodule inside scan body to prevent trace context mismatches\n dynamics_maskgit = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=nnx.Rngs(0),\n )\n nnx.update(dynamics_maskgit, dynamics_state)\n\n # --- Construct + encode video ---\n vid_embed_BSNM = dynamics_maskgit.patch_embed(token_idxs_BSN)\n mask_token_111M = dynamics_maskgit.mask_token.value\n mask_expanded_BSN1 = mask_BSN[..., None]\n vid_embed_BSNM = jnp.where(\n mask_expanded_BSN1, mask_token_111M, vid_embed_BSNM\n )\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = dynamics_maskgit.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(\n act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1])\n )\n vid_embed_BSNM += act_embed_BS1M\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (steps * 2))\n step_temp = temperature * (1.0 - unmasked_ratio)\n final_logits_BSNV = dynamics_maskgit.transformer(vid_embed_BSNM) / step_temp\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_BSN = jnp.argmax(final_logits_BSNV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_BSN = jax.random.categorical(_rng, final_logits_BSNV)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs_BSN = gather_fn(\n jax.nn.softmax(final_logits_BSNV), sampled_token_idxs_BSN\n )\n final_token_probs_BSN += ~mask_BSN\n # Update masked tokens and logits only\n token_idxs_BSN = jnp.where(mask_BSN, sampled_token_idxs_BSN, token_idxs_BSN)\n logits_BSNV = jnp.where(\n jnp.expand_dims(mask_BSN, -1), final_logits_BSNV, logits_BSNV\n )\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n final_token_probs_flat_BP = einops.rearrange(\n final_token_probs_BSN, ""b s n -> b (s n)""\n )\n idx_mask_P = (\n jnp.arange(final_token_probs_flat_BP.shape[-1])\n <= N - num_unmasked_tokens\n )\n sorted_idxs_BP = jnp.argsort(final_token_probs_flat_BP, axis=-1)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask_P))\n mask_flat_BP = einops.rearrange(mask_BSN, ""b s n -> b (s n)"")\n new_mask_flat_BP = mask_update_fn(mask_flat_BP, sorted_idxs_BP)\n new_mask_BSN = einops.rearrange(new_mask_flat_BP, ""b (s n) -> b s n"", n=N)\n\n new_carry = (\n rng,\n token_idxs_BSN,\n logits_BSNV,\n new_mask_BSN,\n action_tokens_EL,\n )\n return new_carry\n\n @nnx.scan(in_axes=(nnx.Carry, 0), out_axes=nnx.Carry)\n def generation_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array], step_t: jax.Array\n ) -> tuple[jax.Array, jax.Array, jax.Array]:\n rng, current_token_idxs_BSN, current_logits_BSNV = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current frame (i.e., t == step_t)\n mask_S = jnp.arange(seq_len) == step_t\n mask_BSN = jnp.broadcast_to(mask_S[None, :, None], (B, seq_len, N)).astype(\n bool\n )\n masked_token_idxs_BSN = current_token_idxs_BSN * ~mask_BSN\n masked_logits_BSNV = current_logits_BSNV * jnp.expand_dims(~mask_BSN, -1)\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs_BSN,\n masked_logits_BSNV,\n mask_BSN,\n action_tokens_EL,\n )\n final_carry_maskgit = maskgit_step_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs_BSN = final_carry_maskgit[1]\n updated_logits_BSNV = final_carry_maskgit[2]\n new_carry = (rng, updated_token_idxs_BSN, updated_logits_BSNV)\n return new_carry\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs_BSN, init_logits_BSNV)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry = generation_step_fn(initial_carry, timesteps_to_scan)\n final_token_idxs_BSN = final_carry[1]\n final_logits_BSNV = final_carry[2]\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC, final_logits_BSNV\n\n def sample_causal(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> tuple[jax.Array, jax.Array]:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n assert isinstance(self.dynamics, DynamicsCausal)\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n logits_BSNV = jnp.zeros((*token_idxs_BSN.shape, self.num_patch_latents))\n dynamics_state = nnx.state(self.dynamics)\n\n if self.use_gt_actions:\n assert self.action_embed is not None\n latent_actions_BT1L = self.action_embed(batch[""actions""]).reshape(\n *batch[""actions""].shape[:2], 1, self.latent_action_dim\n )\n latent_actions_BTm11L = latent_actions_BT1L[:, 1:]\n action_tokens_EL = latent_actions_BTm11L.reshape(-1, self.latent_action_dim)\n else:\n assert self.lam is not None\n latent_actions_E = batch[""latent_actions""]\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n\n @nnx.scan(in_axes=(nnx.Carry, 0), out_axes=nnx.Carry)\n def causal_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array, jax.Array],\n step_n: jax.Array,\n ) -> tuple[jax.Array, jax.Array, jax.Array, jax.Array, jax.Array]:\n rng, token_idxs_BSN, logits_BSNV, action_tokens_EL, step_t = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # We need to reconstruct the submodule inside scan body to prevent trace context mismatches\n dynamics_causal = DynamicsCausal(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=self.decode,\n rngs=nnx.Rngs(0),\n )\n nnx.update(dynamics_causal, dynamics_state)\n\n # --- Construct + encode video ---\n vid_embed_BSNM = dynamics_causal.patch_embed(token_idxs_BSN)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = dynamics_causal.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(\n act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1])\n )\n vid_embed_BSNp1M = jnp.concatenate([act_embed_BS1M, vid_embed_BSNM], axis=2)\n final_logits_BTNp1V = (\n dynamics_causal.transformer(vid_embed_BSNp1M, (step_t, step_n))\n / temperature\n )\n final_logits_BV = final_logits_BTNp1V[:, step_t, step_n, :]\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_B = jnp.argmax(final_logits_BV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_B = jax.random.categorical(_rng, final_logits_BV)\n # Update next tokens only\n token_idxs_BSN = token_idxs_BSN.at[:, step_t, step_n].set(\n sampled_token_idxs_B\n )\n logits_BSNV = logits_BSNV.at[:, step_t, step_n].set(final_logits_BV)\n\n new_carry = (rng, token_idxs_BSN, logits_BSNV, action_tokens_EL, step_t)\n return new_carry\n\n @nnx.scan(in_axes=(nnx.Carry, 0), out_axes=nnx.Carry)\n def generation_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array], step_t: jax.Array\n ) -> tuple[jax.Array, jax.Array, jax.Array]:\n rng, current_token_idxs_BSN, current_logits_BSNV = carry\n rng, step_rng = jax.random.split(rng)\n\n # --- Initialize and run causal loop ---\n init_carry_causal = (\n step_rng,\n current_token_idxs_BSN,\n current_logits_BSNV,\n action_tokens_EL,\n step_t,\n )\n final_carry_causal = causal_step_fn(init_carry_causal, jnp.arange(N))\n updated_token_idxs_BSN = final_carry_causal[1]\n updated_logits_BSNV = final_carry_causal[2]\n new_carry = (rng, updated_token_idxs_BSN, updated_logits_BSNV)\n return new_carry\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs_BSN, logits_BSNV)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry = generation_step_fn(initial_carry, timesteps_to_scan)\n final_token_idxs_BSN = final_carry[1]\n final_logits_BSNV = final_carry[2]\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC, final_logits_BSNV\n\n def vq_encode(self, batch: Dict[str, jax.Array], training: bool) -> jax.Array:\n # --- Preprocess videos ---\n assert self.lam is not None\n video_BTHWC = batch[""videos""]\n lam_output: Dict[str, jax.Array] = self.lam.vq_encode(\n video_BTHWC, training=training\n )\n lam_indices_E = lam_output[""indices""]\n return lam_indices_E\n\n\n# FIXME (f.srambical): add conversion script for old checkpoints\ndef restore_genie_components(\n optimizer: nnx.Optimizer,\n sharding: jax.sharding.NamedSharding,\n rng: jax.Array,\n args,\n) -> nnx.Optimizer:\n """"""Restore pre-trained Genie components""""""\n rng_tokenizer, rng_lam = jax.random.split(rng)\n rngs_tokenizer = nnx.Rngs(rng_tokenizer)\n rngs_lam = nnx.Rngs(rng_lam)\n\n tx = optimizer.tx\n model = optimizer.model\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs_tokenizer,\n )\n dummy_tokenizer_optimizer = nnx.Optimizer(dummy_tokenizer, tx)\n dummy_tokenizer_optimizer_state = nnx.state(dummy_tokenizer_optimizer)\n abstract_sharded_tokenizer_optimizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_optimizer_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_tokenizer_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_tokenizer_optimizer.model, restored_tokenizer.model)\n model.tokenizer = dummy_tokenizer_optimizer.model\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs_lam,\n )\n dummy_lam_optimizer = nnx.Optimizer(dummy_lam, tx)\n dummy_lam_optimizer_state = nnx.state(dummy_lam_optimizer)\n abstract_sharded_lam_optimizer_state = _create_abstract_sharded_pytree(\n dummy_lam_optimizer_state, sharding\n )\n restored_lam_optimizer = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_lam_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_lam_optimizer.model, restored_lam_optimizer.model)\n model.lam = dummy_lam_optimizer.model\n # Remove the LAM decoder to save memory and avoid unnecessary computation.\n del model.lam.decoder\n lam_checkpoint_manager.close()\n\n # Reinitialize the optimizer states\n optimizer = nnx.Optimizer(model, tx)\n return optimizer\n\n\ndef _create_abstract_sharded_pytree(\n pytree_template: nnx.GraphState, sharding_spec: jax.sharding.NamedSharding\n) -> jax.Array:\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab
|
| 3 |
+
2,115,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 4 |
+
3,246,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:03:19 AM [info] Activating crowd-code\n11:03:19 AM [info] Recording started\n11:03:19 AM [info] Initializing git provider using file system watchers...\n11:03:19 AM [info] Git repository found\n11:03:19 AM [info] Git provider initialized successfully\n11:03:19 AM [info] Initial git state: [object Object]\n",Log,content
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-207fe3e3-7fd2-432d-a410-a7a943195e5f1753557295596-2025_07_26-21.15.03.812/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-22253aff-e784-4c9c-8895-436e45dc7cfd1762854327996-2025_11_11-10.45.34.479/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-23a9afd3-c333-4e29-b2ed-efddc66dd34c1757847239961-2025_09_14-11.54.02.348/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-24737a5e-b7f6-491e-94c6-0c20304cd1e41754227167268-2025_08_03-15.19.34.553/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-29574e2f-1a08-4b03-801c-d0672dd595cd1758207078337-2025_09_18-16.51.34.658/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3078fdf5-e108-4d67-88ef-d05be53da09c1757066213479-2025_09_05-11.57.01.57/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3214e60a-43fc-4481-8dba-af84e93214741765441398363-2025_12_11-09.23.30.463/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-332f83dd-06bd-40a9-81d8-350d79e2bdb41764501843297-2025_11_30-12.24.14.12/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-351cc1d9-c2dd-47fa-95b6-84efafa3a3391765561395177-2025_12_12-18.43.22.49/source.csv
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,2,"crowd-pilot-extension/src/extension.ts",0,0,"import * as vscode from 'vscode';\nimport * as http from 'http';\nimport * as fs from 'fs';\nimport * as path from 'path';\nimport { Buffer } from 'buffer';\nimport { ConversationStateManager, estimateTokens } from '@crowd-pilot/serializer';\n\n// -------------------- Preference Data Collection --------------------\n\ninterface PreferenceSample {\n\ttimestamp: number;\n\tcontext: Array<{ role: string; content: string }>;\n\tcompletion: {\n\t\trawModelOutput: string;\n\t\tparsedAction: Action | null;\n\t\tavgLogprob: number;\n\t};\n\toutcome: 'accepted' | 'rejected' | 'ignored' | null;\n\toutcomeTimestamp: number | null;\n\tmodelName: string;\n}\n\ninterface PendingPreferenceSample {\n\tsample: PreferenceSample;\n\tshownAt: number;\n}\n\nlet pendingPreferenceSample: PendingPreferenceSample | null = null;\n\nfunction getPreferenceLogPath(): string {\n\tconst cfg = getConfig();\n\tif (cfg.preferenceLogPath) {\n\t\treturn cfg.preferenceLogPath;\n\t}\n\tconst workspaceFolders = vscode.workspace.workspaceFolders;\n\tif (workspaceFolders) {\n\t\treturn path.join(workspaceFolders[0].uri.fsPath, '.crowd-pilot-preferences.jsonl');\n\t}\n\tthrow new Error(""No preference log path found."");\n}\n\n/**\n * Log a preference sample to the JSONL file.\n * Each line is a complete JSON object for easy streaming/parsing.\n */\nfunction logPreferenceSample(sample: PreferenceSample): void {\n\tconst cfg = getConfig();\n\tif (!cfg.enablePreferenceLogging) {\n\t\tconsole.log(`[crowd-pilot] Preference logging disabled, skipping sample`);\n\t\treturn;\n\t}\n\n\tconst logPath = getPreferenceLogPath();\n\tconst line = JSON.stringify(sample) + '\n';\n\t\n\tfs.appendFile(logPath, line, (err) => {\n\t\tif (err) {\n\t\t\tconsole.error('[crowd-pilot] Failed to log preference sample:', err);\n\t\t} else {\n\t\t\tconsole.log(`[crowd-pilot] Logged preference sample, outcome: (${sample.outcome})`);\n\t\t}\n\t});\n}\n\n/**\n * Create a new pending preference sample when showing a preview.\n * This captures all context needed for reward model training.\n */\nfunction createPendingPreferenceSample(\n\tconversationMessages: Array<{ role: string; content: string }>,\n\trawModelOutput: string,\n\tparsedAction: Action | null,\n\tavgLogprob: number,\n\tmodelName: string\n): void {\n\tconst sample: PreferenceSample = {\n\t\ttimestamp: Date.now(),\n\t\tcontext: conversationMessages,\n\t\tcompletion: {\n\t\t\trawModelOutput,\n\t\t\tparsedAction,\n\t\t\tavgLogprob,\n\t\t},\n\t\toutcome: null,\n\t\toutcomeTimestamp: null,\n\t\tmodelName,\n\t};\n\n\tpendingPreferenceSample = {\n\t\tsample,\n\t\tshownAt: Date.now(),\n\t};\n}\n\n/**\n * Record the outcome of the current pending sample and log it.\n */\nfunction recordPreferenceOutcome(outcome: 'accepted' | 'rejected' | 'ignored'): void {\n\tif (!pendingPreferenceSample) {\n\t\treturn;\n\t}\n\n\tconst sample = pendingPreferenceSample.sample;\n\tsample.outcome = outcome;\n\tsample.outcomeTimestamp = Date.now();\n\n\tlogPreferenceSample(sample);\n\n\tpendingPreferenceSample = null;\n}\n\n/**\n * Mark any pending sample as ignored (user moved on without explicit accept/reject).\n */\nfunction markPendingAsIgnored(): void {\n\tif (pendingPreferenceSample) {\n\t\trecordPreferenceOutcome('ignored');\n\t}\n}\n\ntype Action =\n| { kind: 'showTextDocument' }\n| { kind: 'setSelections', selections: Array<{ start: [number, number], end: [number, number] }> }\n| { kind: 'editInsert', position: [number, number], text: string }\n| { kind: 'editDelete', range: { start: [number, number], end: [number, number] } }\n| { kind: 'editReplace', range: { start: [number, number], end: [number, number] }, text: string }\n| { kind: 'terminalShow' }\n| { kind: 'terminalSendText', text: string }\n| { kind: 'openFile', filePath: string, selections?: Array<{ start: [number, number], end: [number, number] }> };\n\n// Configuration helper\nfunction getConfig() {\n\tconst config = vscode.workspace.getConfiguration('crowd-pilot');\n\treturn {\n\t\thostname: config.get<string>('hostname', 'hai001'),\n\t\tport: config.get<number>('port', 30000),\n\t\tbasePath: config.get<string>('basePath', '/v1/chat/completions'),\n\t\tmodelName: config.get<string>('modelName', 'qwen/qwen3-8b'),\n\t\tminAvgLogprob: config.get<number>('minAvgLogprob', -1.0),\n\t\tmaxContextTokens: config.get<number>('maxContextTokens', 120000),\n\t\tpreferenceLogPath: config.get<string>('preferenceLogPath', ''),\n\t\tenablePreferenceLogging: config.get<boolean>('enablePreferenceLogging', true),\n\t};\n}\n\n// -------------------- Context Window Management --------------------\n\n/**\n * Truncate conversation messages to fit within the context window.\n * Assumes system prompt is the first message. Drops oldest conversation messages first.\n */\nfunction truncateToContextLimit(\n\tmessages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }>,\n\tmaxTokens: number\n): Array<{ role: 'system' | 'user' | 'assistant'; content: string }> {\n\tif (messages.length === 0) { return messages; }\n\n\tconst systemTokens = estimateTokens(messages[0].content);\n\tconst availableTokens = maxTokens - systemTokens;\n\n\tconst tokenCounts = messages.slice(1).map(m => estimateTokens(m.content));\n\tconst totalConversationTokens = tokenCounts.reduce((a, b) => a + b, 0);\n\n\tif (totalConversationTokens <= availableTokens) {\n\t\treturn messages;\n\t}\n\n\tlet keptTokens = 0;\n\tlet cutoffIndex = tokenCounts.length;\n\tfor (let i = tokenCounts.length - 1; i >= 0; i--) {\n\t\tif (keptTokens + tokenCounts[i] <= availableTokens) {\n\t\t\tkeptTokens += tokenCounts[i];\n\t\t\tcutoffIndex = i;\n\t\t} else {\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tconsole.log(`[crowd-pilot] Truncated ${cutoffIndex} oldest messages (${systemTokens + totalConversationTokens} -> ${systemTokens + keptTokens} tokens)`);\n\treturn [messages[0], ...messages.slice(cutoffIndex + 1)];\n}\n\n\n// Global conversation state manager instance\nconst conversationManager = new ConversationStateManager();\n\n// Track activated files (files whose content we've captured)\n// TODO (f.srambical): This logic remains on the extension-side\n// for backwards-compatibility (with the crowd-code dataset).\n// Eventually, we should move the file tracking logic to\n// p-doom/crowd-pilot-serializer.\nconst activatedFiles = new Set<string>();\n\n/**\n * Clear all conversation context - resets the conversation manager and activated files.\n * Call this to start fresh without accumulated history.\n */\nfunction clearContext(): void {\n\tconversationManager.reset();\n\tactivatedFiles.clear();\n\tconsole.log('[crowd-pilot] Context cleared');\n}\n\nlet suggestionsEnabled = true;\nlet statusBarItem: vscode.StatusBarItem | undefined;\n\nfunction updateStatusBarItem(): void {\n\tif (!statusBarItem) { return; }\n\tif (suggestionsEnabled) {\n\t\tstatusBarItem.text = '$(lightbulb) crowd-pilot';\n\t\tstatusBarItem.tooltip = 'crowd-pilot: Tab suggestions enabled (click to disable)';\n\t\tstatusBarItem.backgroundColor = undefined;\n\t} else {\n\t\tstatusBarItem.text = '$(lightbulb-autofix) crowd-pilot';\n\t\tstatusBarItem.tooltip = 'crowd-pilot: Tab suggestions disabled (click to enable)';\n\t\tstatusBarItem.backgroundColor = new vscode.ThemeColor('statusBarItem.warningBackground');\n\t}\n}\n\nexport function activate(context: vscode.ExtensionContext) {\n\n\tconsole.log('[crowd-pilot] Extension activated');\n\n\t(async () => {\n\t\tconst config = vscode.workspace.getConfiguration('terminal.integrated');\n\t\tconst commandsToSkipShell = config.get<string[]>('commandsToSkipShell', []);\n\t\tlet updated = false;\n\t\tif (!commandsToSkipShell.includes('crowd-pilot.modelRun')) {\n\t\t\tcommandsToSkipShell.push('crowd-pilot.modelRun');\n\t\t\tupdated = true;\n\t\t}\n\t\tif (!commandsToSkipShell.includes('crowd-pilot.hideUi')) {\n\t\t\tcommandsToSkipShell.push('crowd-pilot.hideUi');\n\t\t\tupdated = true;\n\t\t}\n\t\tif (updated) {\n\t\t\tawait config.update('commandsToSkipShell', commandsToSkipShell, vscode.ConfigurationTarget.Global);\n\t\t}\n\t})().catch((err) => console.error('[crowd-pilot] Startup initialization error:', err));\n\n\tstatusBarItem = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Right, 100);\n\tstatusBarItem.command = 'crowd-pilot.toggleSuggestions';\n\tupdateStatusBarItem();\n\tstatusBarItem.show();\n\tcontext.subscriptions.push(statusBarItem);\n\n\tconst toggleSuggestions = vscode.commands.registerCommand('crowd-pilot.toggleSuggestions', () => {\n\t\tsuggestionsEnabled = !suggestionsEnabled;\n\t\tupdateStatusBarItem();\n\t\tif (!suggestionsEnabled) {\n\t\t\thidePreviewUI(true);\n\t\t}\n\t\tvscode.window.showInformationMessage(\n\t\t\tsuggestionsEnabled \n\t\t\t\t? '[crowd-pilot]: Tab suggestions enabled' \n\t\t\t\t: '[crowd-pilot]: Tab suggestions disabled'\n\t\t);\n\t});\n\n\tconst hideUi = vscode.commands.registerCommand('crowd-pilot.hideUi', () => {\n\t\trecordPreferenceOutcome('rejected');\n\t\thidePreviewUI(true);\n\t});\n\n\tconst clearContextCmd = vscode.commands.registerCommand('crowd-pilot.clearContext', () => {\n\t\tclearContext();\n\t\tvscode.window.showInformationMessage('[crowd-pilot]: Context cleared');\n\t});\n\n\tconst openPreferenceLogCmd = vscode.commands.registerCommand('crowd-pilot.openPreferenceLog', async () => {\n\t\tconst logPath = getPreferenceLogPath();\n\t\ttry {\n\t\t\tconst uri = vscode.Uri.file(logPath);\n\t\t\tawait vscode.window.showTextDocument(uri);\n\t\t} catch (err: any) {\n\t\t\tif (err.code === 'ENOENT' || err.message?.includes('ENOENT')) {\n\t\t\t\tvscode.window.showInformationMessage('[crowd-pilot] No preference log file exists yet. Accept or reject some suggestions first.');\n\t\t\t} else {\n\t\t\t\tvscode.window.showErrorMessage(`[crowd-pilot] Error opening preference log: ${err.message}`);\n\t\t\t}\n\t\t}\n\t});\n\n\tconst modelRun = vscode.commands.registerCommand('crowd-pilot.modelRun', async () => {\n\t\tconst editor = vscode.window.activeTextEditor;\n\t\tif (!editor) {\n\t\t\treturn;\n\t\t}\n\t\ttry {\n\t\t\tif (!previewVisible) { return; }\n\t\t\tlet action: Action | undefined = currentAction;\n\t\t\tif (!action) {\n\t\t\t\tconst single = await requestModelActions(editor);\n\t\t\t\tcurrentAction = single;\n\t\t\t\taction = single;\n\t\t\t}\n\t\t\tif (!action) {\n\t\t\t\thidePreviewUI();\n\t\t\t\treturn;\n\t\t\t}\n\t\t\trecordPreferenceOutcome('accepted');\n\t\t\thidePreviewUI(false);\n\t\t\tawait executeAction(action);\n\t\t\tautoShowNextAction();\n\t\t} catch (err) {\n\t\t\tconst errorMessage = err instanceof Error ? err.message : String(err);\n\t\t\tvscode.window.showErrorMessage(`Model run failed: ${errorMessage}`);\n\t\t}\n\t});\n\n\tconst sglangTest = vscode.commands.registerCommand('crowd-pilot.sglangTest', async () => {\n\t\ttry {\n\t\t\tawait callSGLangChat();\n\t\t} catch (err) {\n\t\t\tconst errorMessage = err instanceof Error ? err.message : String(err);\n\t\t\tvscode.window.showErrorMessage(`SGLang test failed: ${errorMessage}`);\n\t\t}\n\t});\n\n\tconst onSelChange = vscode.window.onDidChangeTextEditorSelection((e) => {\n\t\tif (e.textEditor === vscode.window.activeTextEditor) {\n\t\t\tsuppressAutoPreview = false;\n\t\t\tschedulePredictionRefresh(true, false);\n\n\t\t\tconst editor = e.textEditor;\n\t\t\tconst selection = e.selections[0];\n\t\t\tif (selection) {\n\t\t\t\tconst filePath = editor.document.uri.fsPath;\n\t\t\t\tconst offset = editor.document.offsetAt(selection.start);\n\t\t\t\tconversationManager.handleSelectionEvent(filePath, offset);\n\t\t\t}\n\t\t}\n\t});\n\n\tconst onActiveChange = vscode.window.onDidChangeActiveTextEditor((editor) => {\n\t\tsuppressAutoPreview = false;\n\t\tschedulePredictionRefresh(true, false);\n\n\t\tif (editor) {\n\t\t\tconst filePath = editor.document.uri.fsPath;\n\t\t\tconst currentFileUri = editor.document.uri.toString();\n\t\t\tlet tabEventText: string | null = null;\n\n\t\t\tif (!activatedFiles.has(currentFileUri)) {\n\t\t\t\ttabEventText = editor.document.getText();\n\t\t\t\tactivatedFiles.add(currentFileUri);\n\t\t\t}\n\n\t\t\tconversationManager.handleTabEvent(filePath, tabEventText);\n\t\t}\n\t});\n\n\tconst onDocChange = vscode.workspace.onDidChangeTextDocument((e) => {\n\t\tif (vscode.window.activeTextEditor?.document === e.document) {\n\t\t\tsuppressAutoPreview = false;\n\t\t\tschedulePredictionRefresh(true, false);\n\n\t\t\tconst filePath = e.document.uri.fsPath;\n\t\t\tfor (const change of e.contentChanges) {\n\t\t\t\tconst offset = change.rangeOffset;\n\t\t\t\tconst length = change.rangeLength;\n\t\t\t\tconst newText = change.text;\n\t\t\t\tconversationManager.handleContentEvent(filePath, offset, length, newText);\n\t\t\t}\n\t\t}\n\t});\n\n\t// Terminal focus event\n\tconst onTerminalChange = vscode.window.onDidChangeActiveTerminal((terminal) => {\n\t\tif (terminal) {\n\t\t\tconversationManager.handleTerminalFocusEvent();\n\t\t}\n\t});\n\n\t// Terminal command execution event\n\tconst onTerminalCommand = vscode.window.onDidStartTerminalShellExecution(async (event) => {\n\t\tconst commandLine = event.execution.commandLine.value;\n\t\tconversationManager.handleTerminalCommandEvent(commandLine);\n\n\t\t// Capture terminal output\n\t\tconst stream = event.execution.read();\n\t\tfor await (const data of stream) {\n\t\t\tconversationManager.handleTerminalOutputEvent(data);\n\t\t}\n\t});\n\n\tcontext.subscriptions.push(\n\t\ttoggleSuggestions,\n\t\thideUi,\n\t\tclearContextCmd,\n\t\topenPreferenceLogCmd,\n\t\tsglangTest,\n\t\tmodelRun,\n\t\tonSelChange,\n\t\tonActiveChange,\n\t\tonDocChange,\n\t\tonTerminalChange,\n\t\tonTerminalCommand\n\t);\n\n\t// Initialize: capture current active editor if any\n\tconst initialEditor = vscode.window.activeTextEditor;\n\tif (initialEditor) {\n\t\tconst filePath = initialEditor.document.uri.fsPath;\n\t\tconst currentFileUri = initialEditor.document.uri.toString();\n\t\tconst tabEventText = initialEditor.document.getText();\n\t\tactivatedFiles.add(currentFileUri);\n\t\tconversationManager.handleTabEvent(filePath, tabEventText);\n\t}\n}\n\nexport function deactivate() {}\n\n// -------------------- Execution --------------------\nlet currentAction: Action | undefined;\n\nfunction getActiveOrCreateTerminal(): vscode.Terminal {\n\tif (vscode.window.activeTerminal) {\n\t\treturn vscode.window.activeTerminal;\n\t}\n\treturn vscode.window.createTerminal('crowd-pilot');\n}\n\nasync function executeAction(action: Action): Promise<void> {\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) { return; }\n\tconst doc = editor.document;\n\tif (action.kind === 'showTextDocument') {\n\t\tawait vscode.window.showTextDocument(doc);\n\t\treturn;\n\t}\n\tif (action.kind === 'setSelections') {\n\t\teditor.selections = action.selections.map(s => new vscode.Selection(\n\t\t\tnew vscode.Position(s.start[0], s.start[1]),\n\t\t\tnew vscode.Position(s.end[0], s.end[1])\n\t\t));\n\t\teditor.revealRange(editor.selections[0], vscode.TextEditorRevealType.InCenterIfOutsideViewport);\n\t\treturn;\n\t}\n\tif (action.kind === 'editInsert') {\n\t\tawait editor.edit((e: vscode.TextEditorEdit) => e.insert(new vscode.Position(action.position[0], action.position[1]), action.text));\n\t\treturn;\n\t}\n\tif (action.kind === 'editDelete') {\n\t\tconst range = new vscode.Range(\n\t\t\tnew vscode.Position(action.range.start[0], action.range.start[1]),\n\t\t\tnew vscode.Position(action.range.end[0], action.range.end[1])\n\t\t);\n\t\tawait editor.edit((e: vscode.TextEditorEdit) => e.delete(range));\n\t\treturn;\n\t}\n\tif (action.kind === 'editReplace') {\n\t\tconst range = new vscode.Range(\n\t\t\tnew vscode.Position(action.range.start[0], action.range.start[1]),\n\t\t\tnew vscode.Position(action.range.end[0], action.range.end[1])\n\t\t);\n\t\tawait editor.edit((e: vscode.TextEditorEdit) => e.replace(range, action.text));\n\t\treturn;\n\t}\n\tif (action.kind === 'terminalShow') {\n\t\tconst term = getActiveOrCreateTerminal();\n\t\tterm.show();\n\t\treturn;\n\t}\n\tif (action.kind === 'terminalSendText') {\n\t\tconst term = getActiveOrCreateTerminal();\n\t\tterm.show();\n\t\tterm.sendText(action.text, false);\n\t\treturn;\n\t}\n\tif (action.kind === 'openFile') {\n\t\tconst uri = vscode.Uri.file(action.filePath);\n\t\tconst openedEditor = await vscode.window.showTextDocument(uri);\n\t\tif (action.selections) {\n\t\t\topenedEditor.selections = action.selections.map(s => new vscode.Selection(\n\t\t\t\tnew vscode.Position(s.start[0], s.start[1]),\n\t\t\t\tnew vscode.Position(s.end[0], s.end[1])\n\t\t\t));\n\t\t\topenedEditor.revealRange(openedEditor.selections[0], vscode.TextEditorRevealType.InCenterIfOutsideViewport);\n\t\t}\n\t\treturn;\n\t}\n}\n\n// -------------------- UI State & Helpers --------------------\nconst UI_CONTEXT_KEY = 'crowdPilot.uiVisible';\nlet previewVisible = false;\nlet decorationDeleteType: vscode.TextEditorDecorationType | undefined;\nlet decorationReplaceType: vscode.TextEditorDecorationType | undefined;\nlet decorationReplaceBlockType: vscode.TextEditorDecorationType | undefined;\nlet mockStep = 0;\nlet suppressAutoPreview = false;\nlet latestRequestId = 0;\nlet currentAbortController: AbortController | undefined;\n\nconst PREDICTION_DEBOUNCE_MS = 150;\nconst PREDICTION_THROTTLE_MS = 300;\n\ntype PendingPrediction = { id: number; timer: NodeJS.Timeout };\n\nlet nextQueuedPredictionId = 0;\nlet pendingPredictions: PendingPrediction[] = [];\nconst cancelledPredictionIds = new Set<number>();\nlet lastPredictionTimestamp: number | undefined;\n\nfunction disposePreviewDecorations() {\n\ttry { decorationDeleteType?.dispose(); } catch {}\n\ttry { decorationReplaceType?.dispose(); } catch {}\n\ttry { decorationReplaceBlockType?.dispose(); } catch {}\n\tdecorationDeleteType = undefined;\n\tdecorationReplaceType = undefined;\n\tdecorationReplaceBlockType = undefined;\n}\n\nfunction getDynamicMargin(editor: vscode.TextEditor, anchorLine: number, text: string): string {\n\tconst lines = text.split(/\r?\n/);\n\tconst height = lines.length;\n\t\n\t// We need to check the document lines that will be covered by this panel.\n\t// The panel starts at 'anchorLine' and extends downwards by 'height' lines.\n\t// However, visually, since it's 'after', it sits to the right of 'anchorLine',\n\t// and then flows down.\n\t// So we check document lines from anchorLine to anchorLine + height - 1.\n\t\n\tconst doc = editor.document;\n\tlet maxLen = 0;\n\tconst startLine = anchorLine;\n\tconst endLine = Math.min(doc.lineCount - 1, anchorLine + height - 1);\n\t\n\tfor (let i = startLine; i <= endLine; i++) {\n\t\tconst lineText = doc.lineAt(i).text;\n\t\tconst len = lineText.replace(/\t/g, ' ').length;\n\t\tif (len > maxLen) {\n\t\t\tmaxLen = len;\n\t\t}\n\t}\n\t\n\tconst anchorLineText = doc.lineAt(anchorLine).text;\n\tconst anchorLen = anchorLineText.replace(/\t/g, ' ').length;\n\t\n\tconst diff = Math.max(0, maxLen - anchorLen);\n\tconst margin = diff + 4; \n\treturn `${margin}ch`;\n}\n\nfunction showPreviewUI(action: Action): void {\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) { return; }\n\tdisposePreviewDecorations();\n\n\tconst next = (action.kind === 'editInsert' || action.kind === 'editDelete' || action.kind === 'editReplace' || action.kind === 'terminalSendText' || action.kind === 'setSelections' || action.kind === 'openFile') ? action : undefined;\n\tif (!next) {\n\t\tpreviewVisible = false;\n\t\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, false);\n\t\tcurrentAction = action;\n\t\treturn;\n\t}\n\n\tconst trimText = (t: string) => {\n\t\tconst oneLine = t.replace(/\r?\n/g, '\\n');\n\t\treturn oneLine.length > 80 ? oneLine.slice(0, 77) + '…' : oneLine;\n\t};\n\n\tif (next.kind === 'setSelections') {\n\t\tconst selection = next.selections[0];\n\t\tconst targetPos = new vscode.Position(selection.start[0], selection.start[1]);\n\t\tconst isVisible = editor.visibleRanges.some(r => r.contains(targetPos));\n\t\t\n\t\tlet anchorPos = targetPos;\n\t\tlet label = ""↳ Move Cursor Here"";\n\n\t\tif (!isVisible && editor.visibleRanges.length > 0) {\n\t\t\tconst firstVisible = editor.visibleRanges[0].start;\n\t\t\tconst lastVisible = editor.visibleRanges[editor.visibleRanges.length - 1].end;\n\t\t\t\n\t\t\tif (targetPos.isBefore(firstVisible)) {\n\t\t\t\tanchorPos = new vscode.Position(firstVisible.line, Number.MAX_VALUE);\n\t\t\t} else {\n\t\t\t\tanchorPos = new vscode.Position(lastVisible.line, Number.MAX_VALUE);\n\t\t\t}\n\n\t\t\tif (targetPos.line < anchorPos.line) {\n\t\t\t\tlabel = `↑ Move Cursor to Line ${targetPos.line + 1}`;\n\t\t\t} else {\n\t\t\t\tlabel = `↓ Move Cursor to Line ${targetPos.line + 1}`;\n\t\t\t}\n\t\t}\n\n\t\tconst margin = getDynamicMargin(editor, anchorPos.line, label);\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${label}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top;`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t} else if (next.kind === 'terminalSendText') {\n\t\tconst cursor = editor.selection.active;\n\t\tconst isVisible = editor.visibleRanges.some(r => r.contains(cursor));\n\t\t\n\t\tlet anchorPos = new vscode.Position(cursor.line, Number.MAX_VALUE);\n\t\t\n\t\tif (!isVisible && editor.visibleRanges.length > 0) {\n\t\t\tconst firstVisible = editor.visibleRanges[0].start;\n\t\t\tconst lastVisible = editor.visibleRanges[editor.visibleRanges.length - 1].end;\n\t\t\t\n\t\t\tif (cursor.isBefore(firstVisible)) {\n\t\t\t\tanchorPos = new vscode.Position(firstVisible.line, Number.MAX_VALUE);\n\t\t\t} else {\n\t\t\t\tanchorPos = new vscode.Position(lastVisible.line, Number.MAX_VALUE);\n\t\t\t}\n\t\t}\n\t\t\n\t\tconst summary = trimText(next.text || '');\n\t\tconst label = `↳ Execute shell command in terminal: ${summary}`;\n\t\tconst margin = getDynamicMargin(editor, anchorPos.line, label);\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${label.replace(/""/g, '\\""')}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top;`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t} else if (next.kind === 'editInsert') {\n\t\tconst posLine = next.position[0];\n\t\tconst fullBlock = next.text;\n\t\tconst cssContent = fullBlock\n\t\t\t.replace(/""/g, '\\""')\n\t\t\t.replace(/\r?\n/g, '\\A ');\n\n\t\tconst docLineCount = editor.document.lineCount;\n\t\tlet anchorLine = posLine;\n\t\tlet shiftUp = true;\n\t\t\n\t\tif (anchorLine >= docLineCount) {\n\t\t\tanchorLine = docLineCount - 1;\n\t\t\tshiftUp = false;\n\t\t}\n\n\t\tconst anchorPos = new vscode.Position(anchorLine, Number.MAX_VALUE); \n\t\t\n\t\tconst marginCheckLine = anchorLine;\n\t\tconst margin = getDynamicMargin(editor, marginCheckLine, fullBlock);\n\n\t\tconst topOffset = '0';\n\n\t\tconst beforeDecoration = {\n\t\t\tcontentText: '',\n\t\t\ttextDecoration: `none; position: absolute; left: 0; width: 100vw; border-top: 1px dashed var(--vscode-charts-purple); top: 0; height: 0; z-index: 99; pointer-events: none;`\n\t\t};\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tbefore: beforeDecoration,\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${cssContent}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top; top: ${topOffset};`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t} else if (next.kind === 'editDelete') {\n\t\tconst range = new vscode.Range(\n\t\t\tnew vscode.Position(next.range.start[0], next.range.start[1]),\n\t\t\tnew vscode.Position(next.range.end[0], next.range.end[1])\n\t\t);\n\t\tdecorationDeleteType = vscode.window.createTextEditorDecorationType({\n\t\t\tbackgroundColor: 'rgba(255, 60, 60, 0.18)',\n\t\t\tborder: '1px solid rgba(255, 60, 60, 0.35)',\n\t\t\ttextDecoration: 'line-through'\n\t\t});\n\t\teditor.setDecorations(decorationDeleteType, [{ range }]);\n\t} else if (next.kind === 'editReplace') {\n\t\tconst range = new vscode.Range(\n\t\t\tnew vscode.Position(next.range.start[0], next.range.start[1]),\n\t\t\tnew vscode.Position(next.range.end[0], next.range.end[1])\n\t\t);\n\t\tdecorationReplaceType = vscode.window.createTextEditorDecorationType({\n\t\t\tbackgroundColor: 'rgba(255,165,0,0.15)',\n\t\t\tborder: '1px dashed rgba(255,165,0,0.45)',\n\t\t\tcolor: new vscode.ThemeColor('disabledForeground'),\n\t\t\ttextDecoration: 'line-through'\n\t\t});\n\t\teditor.setDecorations(decorationReplaceType, [{ range }]);\n\n\t\tconst fullBlock = next.text;\n\t\t\n\t\tconst cssContent = fullBlock\n\t\t\t.replace(/""/g, '\\""')\n\t\t\t.replace(/\r?\n/g, '\\A '); \n\n\t\tconst anchorLine = range.start.line;\n\t\tconst anchorPos = new vscode.Position(anchorLine, Number.MAX_VALUE);\n\t\tconst margin = getDynamicMargin(editor, anchorLine, fullBlock);\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${cssContent}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top;`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t} else if (next.kind === 'openFile') {\n\t\tconst cursor = editor.selection.active;\n\t\tconst isVisible = editor.visibleRanges.some(r => r.contains(cursor));\n\t\t\n\t\tlet anchorPos = new vscode.Position(cursor.line, Number.MAX_VALUE);\n\t\t\n\t\tif (!isVisible && editor.visibleRanges.length > 0) {\n\t\t\tconst firstVisible = editor.visibleRanges[0].start;\n\t\t\tconst lastVisible = editor.visibleRanges[editor.visibleRanges.length - 1].end;\n\t\t\t\n\t\t\tif (cursor.isBefore(firstVisible)) {\n\t\t\t\tanchorPos = new vscode.Position(firstVisible.line, Number.MAX_VALUE);\n\t\t\t} else {\n\t\t\t\tanchorPos = new vscode.Position(lastVisible.line, Number.MAX_VALUE);\n\t\t\t}\n\t\t}\n\t\t\n\t\tconst fileName = next.filePath.split(/[/\\]/).pop() || next.filePath;\n\t\tconst targetLine = next.selections?.[0]?.start[0];\n\t\tconst label = targetLine !== undefined\n\t\t\t? `↳ Switch to file: ${fileName}:${targetLine + 1}` // Display as 1-based\n\t\t\t: `↳ Switch to file: ${fileName}`;\n\t\tconst margin = getDynamicMargin(editor, anchorPos.line, label);\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${label.replace(/""/g, '\\""')}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top;`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t}\n\n\tpreviewVisible = true;\n\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, true);\n\tcurrentAction = action;\n}\n\nfunction hidePreviewUI(suppress?: boolean): void {\n\tdisposePreviewDecorations();\n\tpreviewVisible = false;\n\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, false);\n\tif (suppress) {\n\t\tsuppressAutoPreview = true;\n\t}\n}\n\n/**\n * Schedule a model preview refresh, coalescing rapid editor events and\n * throttling how often we actually talk to the model.\n */\nfunction schedulePredictionRefresh(debounce: boolean, userRequested: boolean): void {\n\tif (!suggestionsEnabled) {\n\t\treturn;\n\t}\n\tif (!userRequested && suppressAutoPreview) {\n\t\treturn;\n\t}\n\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) {\n\t\thidePreviewUI();\n\t\treturn;\n\t}\n\n\tif (!userRequested) {\n\t\tif (!vscode.window.state.focused) {\n\t\t\thidePreviewUI();\n\t\t\treturn;\n\t\t}\n\t\tif (editor.document.getText().length === 0) {\n\t\t\thidePreviewUI();\n\t\t\treturn;\n\t\t}\n\t}\n\n\tconst now = Date.now();\n\tconst id = ++nextQueuedPredictionId;\n\n\tlet delay = 0;\n\tif (debounce) {\n\t\tdelay = Math.max(delay, PREDICTION_DEBOUNCE_MS);\n\t}\n\tif (lastPredictionTimestamp !== null && lastPredictionTimestamp !== undefined) {\n\t\tconst elapsed = now - lastPredictionTimestamp;\n\t\tif (elapsed < PREDICTION_THROTTLE_MS) {\n\t\t\tdelay = Math.max(delay, PREDICTION_THROTTLE_MS - elapsed);\n\t\t}\n\t}\n\n\tconst timer = setTimeout(() => {\n\t\tif (cancelledPredictionIds.has(id)) {\n\t\t\tcancelledPredictionIds.delete(id);\n\t\t\treturn;\n\t\t}\n\n\t\tlastPredictionTimestamp = Date.now();\n\t\tpendingPredictions = pendingPredictions.filter(p => p.id !== id);\n\n\t\tvoid autoShowNextAction();\n\t}, delay);\n\n\tpendingPredictions.push({ id, timer });\n\n\tif (pendingPredictions.length > 2) {\n\t\tconst oldest = pendingPredictions.shift();\n\t\tif (oldest) {\n\t\t\tcancelledPredictionIds.add(oldest.id);\n\t\t\tclearTimeout(oldest.timer);\n\t\t}\n\t}\n}\n\nasync function autoShowNextAction(): Promise<void> {\n\tif (suppressAutoPreview) { return; }\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) { return; }\n\ttry {\n\t\tcurrentAbortController?.abort();\n\t\tconst controller = new AbortController();\n\t\tcurrentAbortController = controller;\n\t\tconst requestId = ++latestRequestId;\n\t\tconst next = await requestModelActions(editor, controller.signal);\n\t\tif (requestId !== latestRequestId) { return; }\n\t\tif (next) { showPreviewUI(next); } else { hidePreviewUI(); }\n\t} catch (err) {\n\t\tconst e = err as any;\n\t\tconst isAbort = e?.name === 'AbortError' || /aborted/i.test(String(e?.message ?? ''));\n\t\tif (isAbort) { return; }\n\t\thidePreviewUI();\n\t}\n}\n\n// -------------------- SGLang Client (simple test) --------------------\nasync function callSGLangChat(): Promise<void> {\n\tconst cfg = getConfig();\n\tconst headers: any = {\n\t\t'Content-Type': 'application/json'\n\t};\n\n\n\tconst requestBody: any = {\n\t\tmodel: cfg.modelName,\n\t\tmessages: [\n\t\t\t{ role: 'user', content: 'What is the capital of France?' }\n\t\t]\n\t};\n\trequestBody.temperature = 0.7;\n\trequestBody.top_p = 0.8;\n\trequestBody.top_k = 20;\n\trequestBody.min_p = 0;\n\trequestBody.chat_template_kwargs = {\n\t\tenable_thinking: false\n\t};\n\tconst postData = JSON.stringify(requestBody);\n\theaders['Content-Length'] = Buffer.byteLength(postData);\n\n\tconst options = {\n\t\thostname: cfg.hostname,\n\t\tport: cfg.port,\n\t\tpath: cfg.basePath,\n\t\tmethod: 'POST',\n\t\theaders\n\t};\n\n\n\ttry {\n\t\tconst json = await new Promise<any>((resolve, reject) => {\n\t\t\tconst req = http.request(options, (res: http.IncomingMessage) => {\n\t\t\t\tlet data = '';\n\t\t\t\tres.on('data', (chunk: Buffer) => {\n\t\t\t\t\tdata += chunk.toString();\n\t\t\t\t});\n\t\t\t\tres.on('end', () => {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tresolve(JSON.parse(data));\n\t\t\t\t\t} catch (err) {\n\t\t\t\t\t\treject(new Error(`Failed to parse response: ${err instanceof Error ? err.message : String(err)}`));\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t});\n\n\t\t\treq.on('error', (err: Error) => {\n\t\t\t\treject(err);\n\t\t\t});\n\n\t\t\treq.write(postData);\n\t\t\treq.end();\n\t\t});\n\n\t\tvscode.window.showInformationMessage(`Response: ${JSON.stringify(json, null, 2)}`);\n\t} catch (err) {\n\t\tconst errorMessage = err instanceof Error ? err.message : String(err);\n\t\tvscode.window.showErrorMessage(`Request failed: ${errorMessage}`);\n\t}\n}\n\n// -------------------- Model-planned Actions --------------------\nasync function requestModelActions(editor: vscode.TextEditor, signal?: AbortSignal): Promise<Action> {\n\tconst cfg = getConfig();\n\tconst headers: any = {\n\t\t'Content-Type': 'application/json'\n\t};\n\n\tconst doc = editor.document;\n\n\t// FIXME (f.srambical): This should be the system prompt that was used during serialization.\n\tconst systemPrompt = [\n\t\t'You are a helpful assistant that interacts with a computer shell to solve programming tasks.',\n\t\t'Your goal is to predict the next bash command a developer would most likely execute, given their editing and navigation history.',\n\t\t'',\n\t\t'=== CONVERSATION FORMAT ===',\n\t\t'The conversation history alternates between:',\n\t\t'- Assistant messages: bash commands in fenced code blocks',\n\t\t'- User messages: command output wrapped in <stdout>...</stdout> tags',\n\t\t'',\n\t\t'File contents are displayed with 6-character right-aligned line numbers followed by a tab, e.g.:',\n\t\t' 1\tfirst line',\n\t\t' 2\tsecond line',\n\t\t'',\n\t\t'File content is typically shown in viewports of ~20 lines around the area of interest.',\n\t\t'',\n\t\t'=== RESPONSE FORMAT ===',\n\t\t'Your response must contain exactly ONE bash code block with one command or two commands connected with &&.',\n\t\t'',\n\t\t'<format_example>',\n\t\t'```bash',\n\t\t'your_command_here',\n\t\t'```',\n\t\t'</format_example>',\n\t\t'',\n\t\t'Failure to follow these rules will cause your response to be rejected.',\n\t\t'',\n\t\t'=== EDIT COMMAND FORMAT (IMPORTANT) ===',\n\t\t'When you want to EDIT a file, you MUST encode the edit using line-based sed commands in ONE of the following forms,',\n\t\t'and you MUST NOT use substitution commands like ""Ns/old/new/g"".',\n\t\t'',\n\t\t'Assume all line numbers are 1-based and paths are absolute.',\n\t\t'Allowed edit encodings (choose exactly one per response):',\n\t\t'',\n\t\t'1) Replace a contiguous block of lines:',\n\t\t"" sed -i 'START,ENDc\\"",\n\t\t'NEW_LINE_1',\n\t\t'NEW_LINE_2',\n\t\t""..."",\n\t\t""' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'"",\n\t\t'',\n\t\t'2) Delete a contiguous block of lines:',\n\t\t"" sed -i 'START,ENDd' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'"",\n\t\t'',\n\t\t'3) Insert new lines BEFORE a given line:',\n\t\t"" sed -i 'STARTi\\"",\n\t\t'NEW_LINE_1',\n\t\t'NEW_LINE_2',\n\t\t""..."",\n\t\t""' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'"",\n\t\t'',\n\t\t'4) Append new lines at the END of the file:',\n\t\t"" sed -i '$a\\"",\n\t\t'NEW_LINE_1',\n\t\t'NEW_LINE_2',\n\t\t""..."",\n\t\t""' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'"",\n\t\t'',\n\t\t'Where VSTART and VEND specify a small viewport around the edited region.',\n\t\t'',\n\t\t'Do NOT emit commands like ""3s/print/print()/g"" or any other ""s/old/new/"" style sed substitution; instead,',\n\t\t'always rewrite the affected lines using one of the line-based forms above.',\n\t\t'',\n\t\t'When you are NOT editing files (e.g., running tests, git commands, tools, etc.), you may emit arbitrary bash commands.'\n\t].join('\n');\n\n\tconst accumulatedMessages = conversationManager.finalizeForModel();\n\t\n\tlet conversationMessages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }> = [\n\t\t{ role: 'system', content: systemPrompt },\n\t];\n\t\n\tfor (const msg of accumulatedMessages) {\n\t\tconst role = msg.from === 'User' ? 'user' : 'assistant';\n\t\tconversationMessages.push({ role, content: msg.value });\n\t}\n\n\tconversationMessages = truncateToContextLimit(conversationMessages, cfg.maxContextTokens);\n\n\tconst requestBody: any = {\n\t\tmodel: cfg.modelName,\n\t\tmessages: conversationMessages\n\t};\n\trequestBody.temperature = 0.7;\n\trequestBody.top_p = 0.8;\n\trequestBody.top_k = 20;\n\trequestBody.min_p = 0;\n\trequestBody.logprobs = true;\n\trequestBody.chat_template_kwargs = {\n\t\tenable_thinking: false\n\t};\n\n\tconst postData = JSON.stringify(requestBody);\n\theaders['Content-Length'] = Buffer.byteLength(postData);\n\n\tconst options: any = {\n\t\thostname: cfg.hostname,\n\t\tport: cfg.port,\n\t\tpath: cfg.basePath,\n\t\tmethod: 'POST',\n\t\theaders\n\t};\n\tif (signal) {\n\t\toptions.signal = signal;\n\t}\n\n\tconst json = await new Promise<any>((resolve, reject) => {\n\t\tconst req = http.request(options, (res: http.IncomingMessage) => {\n\t\t\tlet data = '';\n\t\t\tres.on('data', (chunk: Buffer) => { data += chunk.toString(); });\n\t\t\tres.on('end', () => {\n\t\t\t\ttry {\n\t\t\t\t\tresolve(JSON.parse(data));\n\t\t\t\t} catch (err) {\n\t\t\t\t\treject(new Error(`Failed to parse response: ${err instanceof Error ? err.message : String(err)}`));\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\t\treq.on('error', (err: Error) => reject(err));\n\t\treq.write(postData);\n\t\treq.end();\n\t});\n\n\tconst avgLogprob = calculateAverageLogprob(json);\n\tif (avgLogprob < cfg.minAvgLogprob) {\n\t\treturn undefined as any; // Low confidence, silently skip suggestion\n\t}\n\n\tconst content = extractChatContent(json);\n\tif (typeof content !== 'string' || content.trim().length === 0) {\n\t\tthrow new Error('Empty model content');\n\t}\n\tconst action = parseAction(content, doc);\n\tif (!action) {\n\t\tthrow new Error('No valid action parsed from model output');\n\t}\n\n\tmarkPendingAsIgnored();\n\n\tcreatePendingPreferenceSample(\n\t\tconversationMessages,\n\t\tcontent,\n\t\taction,\n\t\tavgLogprob,\n\t\tcfg.modelName\n\t);\n\n\treturn action;\n}\n\nfunction extractChatContent(json: any): string | undefined {\n\ttry {\n\t\tif (json && Array.isArray(json.choices) && json.choices[0]) {\n\t\t\tconst choice = json.choices[0];\n\t\t\tif (choice.message && typeof choice.message.content === 'string') {\n\t\t\t\treturn choice.message.content;\n\t\t\t}\n\t\t\tif (typeof choice.text === 'string') {\n\t\t\t\treturn choice.text;\n\t\t\t}\n\t\t}\n\t\treturn undefined;\n\t} catch {\n\t\treturn undefined;\n\t}\n}\n\n/**\n * Calculate average logprob per token from the API response.\n * Returns the mean of logprobs across all tokens (negative value, closer to 0 = more confident).\n * Returns -Infinity if logprobs are not available.\n */\nfunction calculateAverageLogprob(json: any): number {\n\tconst logprobs = json.choices[0]?.logprobs;\n\tconst sum = logprobs.content.reduce((s: number, t: any) => s + t.logprob, 0);\n\treturn sum / logprobs.content.length;\n}\n\nfunction parseAction(raw: string, doc?: vscode.TextDocument): Action | undefined {\n\tconst command = extractBashCommand(raw);\n\tif (!command) {\n\t\treturn undefined;\n\t}\n\tconst normalized = command.replace(/<think>[\s\S]*?<\/think>/gi, '').trim();\n\tif (!normalized) {\n\t\treturn undefined;\n\t}\n\tif (doc) {\n\t\tconst editAction = parseEditFromSedCommand(normalized, doc);\n\t\tif (editAction) {\n\t\t\treturn editAction;\n\t\t}\n\t\tconst viewportAction = parseViewportFromCatCommand(normalized, doc);\n\t\tif (viewportAction) {\n\t\t\treturn viewportAction;\n\t\t}\n\t}\n\treturn { kind: 'terminalSendText', text: normalized };\n}\n\n/**\n * Parse a sed-based edit command of the form emitted by the NeMo serializer into a VS Code edit action.\n *\n * Supported patterns (1-based line numbers, mirroring serialization_utils.py):\n * sed -i 'START,ENDc\n<replacement...>' <file> -> editReplace\n * sed -i 'START,ENDd' <file> -> editDelete\n * sed -i 'STARTi\n<insert...>' <file> -> editInsert (before START)\n * sed -i '$a\n<append...>' <file> -> editInsert (append at EOF)\n *\n * If the command does not match these patterns, returns undefined.\n */\nfunction parseEditFromSedCommand(command: string, doc: vscode.TextDocument): Action | undefined {\n\t// Only consider the first command before && / ||, since cat -n etc. are for viewport only.\n\tconst main = command.split(/&&|\|\|/)[0]?.trim() ?? '';\n\tif (!main) {\n\t\treturn undefined;\n\t}\n\n\t// Match: sed -i '<script>' <file>\n\tconst sedMatch = main.match(/sed\s+-i\s+'([\s\S]*?)'\s+([^\s&|]+)\s*$/);\n\tif (!sedMatch) {\n\t\treturn undefined;\n\t}\n\tconst script = sedMatch[1] ?? '';\n\tconst targetFile = sedMatch[2] ?? '';\n\tconst activePath = doc.uri.fsPath;\n\tif (targetFile !== activePath) {\n\t\treturn undefined;\n\t}\n\n\t// Delete: ""START,ENDd""\n\tconst deleteMatch = script.match(/^(\d+),(\d+)d$/);\n\tif (deleteMatch) {\n\t\tconst startLine1 = Number(deleteMatch[1]);\n\t\tconst endLine1 = Number(deleteMatch[2]);\n\t\tif (!Number.isFinite(startLine1) || !Number.isFinite(endLine1)) {\n\t\t\treturn undefined;\n\t\t}\n\t\tconst startLine0 = Math.max(0, startLine1 - 1);\n\t\tconst endLine0 = Math.max(0, endLine1 - 1);\n\n\t\tlet endPosLine = endLine0 + 1;\n\t\tlet endPosChar = 0;\n\t\tif (endPosLine >= doc.lineCount) {\n\t\t\tendPosLine = doc.lineCount - 1;\n\t\t\tendPosChar = doc.lineAt(endPosLine).range.end.character;\n\t\t}\n\t\treturn {\n\t\t\tkind: 'editDelete',\n\t\t\trange: {\n\t\t\t\tstart: [startLine0, 0],\n\t\t\t\tend: [endPosLine, endPosChar],\n\t\t\t},\n\t\t};\n\t}\n\n\t// Replace: ""START,ENDc\newline<payload...>""\n\tconst replaceMatch = script.match(/^(\d+),(\d+)c\\\n([\s\S]*)$/);\n\tif (replaceMatch) {\n\t\tconst startLine1 = Number(replaceMatch[1]);\n\t\tconst endLine1 = Number(replaceMatch[2]);\n\t\tlet payload = replaceMatch[3] ?? '';\n\t\tif (!Number.isFinite(startLine1) || !Number.isFinite(endLine1)) {\n\t\t\treturn undefined;\n\t\t}\n\t\tpayload = payload.replace(/'\""'\""'/g, ""'"");\n\t\t// Convert escape sequences to actual characters\n\t\tpayload = payload.replace(/\\n/g, '\n').replace(/\\t/g, '\t').replace(/\\'/g, ""'"").replace(/\\\\/g, '\\');\n\t\tconst startLine0 = Math.max(0, startLine1 - 1);\n\t\tconst endLine0 = Math.max(0, endLine1 - 1);\n\t\tconst startPos: [number, number] = [startLine0, 0];\n\n\t\tlet endPosLine = endLine0 + 1;\n\t\tlet endPosChar = 0;\n\t\tif (endPosLine >= doc.lineCount) {\n\t\t\tendPosLine = doc.lineCount - 1;\n\t\t\tendPosChar = doc.lineAt(endPosLine).range.end.character;\n\t\t}\n\n\t\tconst text = payload.endsWith('\n') ? payload : payload + '\n';\n\t\treturn {\n\t\t\tkind: 'editReplace',\n\t\t\trange: { start: startPos, end: [endPosLine, endPosChar] },\n\t\t\ttext,\n\t\t};\n\t}\n\n\tconst insertMatch = script.match(/^(\d+)i\\\n([\s\S]*)$/);\n\tif (insertMatch) {\n\t\tconst line1 = Number(insertMatch[1]);\n\t\tlet payload = insertMatch[2] ?? '';\n\t\tif (!Number.isFinite(line1)) {\n\t\t\treturn undefined;\n\t\t}\n\t\tpayload = payload.replace(/'\""'\""'/g, ""'"");\n\t\t// Convert escape sequences to actual characters\n\t\tpayload = payload.replace(/\\n/g, '\n').replace(/\\t/g, '\t').replace(/\\'/g, ""'"").replace(/\\\\/g, '\\');\n\t\tconst insertLine0 = Math.max(0, line1 - 1);\n\t\tconst position: [number, number] = [insertLine0, 0];\n\t\tconst text = payload.endsWith('\n') ? payload : payload + '\n';\n\t\treturn {\n\t\t\tkind: 'editInsert',\n\t\t\tposition,\n\t\t\ttext,\n\t\t};\n\t}\n\n\tconst appendMatch = script.match(/^\$a\\\n([\s\S]*)$/);\n\tif (appendMatch) {\n\t\tlet payload = appendMatch[1] ?? '';\n\t\tpayload = payload.replace(/'\""'\""'/g, ""'"");\n\t\t// Convert escape sequences to actual characters\n\t\tpayload = payload.replace(/\\n/g, '\n').replace(/\\t/g, '\t').replace(/\\'/g, ""'"").replace(/\\\\/g, '\\');\n\t\tconst insertLine0 = doc.lineCount;\n\t\tconst position: [number, number] = [insertLine0, 0];\n\t\tconst needsLeadingNewline = doc.lineCount > 0;\n\t\tconst base = payload.endsWith('\n') ? payload : payload + '\n';\n\t\tconst text = needsLeadingNewline ? '\n' + base : base;\n\t\treturn {\n\t\t\tkind: 'editInsert',\n\t\t\tposition,\n\t\t\ttext,\n\t\t};\n\t}\n\n\treturn undefined;\n}\n\n/**\n * Parse viewport / selection commands of the form:\n * cat -n <file> | sed -n 'START,ENDp'\n *\n * into a lightweight VS Code selection move (setSelections). This mirrors how\n * selection and viewport events are serialized in serialization_utils.py.\n */\nfunction parseViewportFromCatCommand(command: string, doc: vscode.TextDocument): Action | undefined {\n\tconst main = command.split(/&&|\|\|/)[0]?.trim() ?? '';\n\tif (!main) {\n\t\treturn undefined;\n\t}\n\n\t// Simple file-open: cat -n <file>\n\tconst simpleCatMatch = main.match(/^cat\s+-n\s+([^\s|]+)\s*$/);\n\tif (simpleCatMatch) {\n\t\tconst targetFile = simpleCatMatch[1] ?? '';\n\t\tif (targetFile !== doc.uri.fsPath) {\n\t\t\treturn { kind: 'openFile', filePath: targetFile };\n\t\t}\n\t\t// Ensure the active document is visible; rely on existing editor to handle this.\n\t\treturn { kind: 'showTextDocument' };\n\t}\n\n\t// Viewport slice: cat -n <file> | sed -n 'START,ENDp'\n\tconst viewportMatch = main.match(/^cat\s+-n\s+([^\s|]+)\s*\|\s*sed\s+-n\s+'(\d+),(\d+)p'\s*$/);\n\tif (!viewportMatch) {\n\t\treturn undefined;\n\t}\n\n\tconst targetFile = viewportMatch[1] ?? '';\n\tconst startStr = viewportMatch[2] ?? '';\n\tconst endStr = viewportMatch[3] ?? '';\n\n\tconst startLine1 = Number(startStr);\n\tconst endLine1 = Number(endStr);\n\n\t// Place the cursor in the middle of the viewport (1-based to 0-based).\n\tconst center1 = Math.floor((startLine1 + endLine1) / 2);\n\tconst center0 = Math.max(0, center1 - 1);\n\n\tif (targetFile !== doc.uri.fsPath) {\n\t\treturn {\n\t\t\tkind: 'openFile',\n\t\t\tfilePath: targetFile,\n\t\t\tselections: [{ start: [center0, 0], end: [center0, 0] }]\n\t\t};\n\t}\n\tconst lastLine = Math.max(0, doc.lineCount - 1);\n\tconst line = Math.min(center0, lastLine);\n\n\treturn {\n\t\tkind: 'setSelections',\n\t\tselections: [\n\t\t\t{\n\t\t\t\tstart: [line, 0],\n\t\t\t\tend: [line, 0],\n\t\t\t},\n\t\t],\n\t};\n}\n\nfunction extractBashCommand(raw: string): string | undefined {\n\tif (!raw) {\n\t\treturn undefined;\n\t}\n\tconst trimmed = raw.trim();\n\tconst fenceMatch = trimmed.match(/```(?:bash)?\s*([\s\S]*?)```/i);\n\tif (fenceMatch && fenceMatch[1]) {\n\t\treturn fenceMatch[1];\n\t}\n\t// Fallback: treat entire response as the command\n\treturn trimmed.length > 0 ? trimmed : undefined;\n}",typescript,tab
|
| 3 |
+
2,242,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"6:43:22 PM [info] Activating crowd-code\n6:43:22 PM [info] Recording started\n6:43:22 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 4 |
+
3,300,"crowd-pilot-extension/src/extension.ts",0,0,"",typescript,tab
|
| 5 |
+
4,1607,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 6 |
+
5,3711,"TERMINAL",0,0,"",,terminal_focus
|
| 7 |
+
6,3713,"crowd-pilot-extension/src/extension.ts",0,0,"",typescript,tab
|
| 8 |
+
7,8154,"TERMINAL",0,0,"ls -lh",,terminal_command
|
| 9 |
+
8,8160,"TERMINAL",0,0,"]633;Ctotal 4.8M\r\n-rw-r--r-- 1 franz.srambical franz.srambical 238 Nov 6 18:09 CHANGELOG.md\r\n-rw-rw---- 1 franz.srambical franz.srambical 2.1M Dec 12 18:42 crowd-pilot-0.0.1.vsix\r\n-rw-rw---- 1 franz.srambical franz.srambical 200K Dec 11 12:45 crowd-pilot-old.vsix\r\ndrwxrwx--- 4 franz.srambical franz.srambical 206 Dec 12 17:59 [0m[01;34mcrowd-pilot-serializer[0m\r\n-rw-rw---- 1 franz.srambical franz.srambical 1022K Dec 12 18:42 [01;31mcrowd-pilot-serializer-0.1.0.tgz[0m\r\n-rw-r--r-- 1 franz.srambical franz.srambical 629 Nov 6 18:09 eslint.config.mjs\r\n-rw-rw---- 1 franz.srambical franz.srambical 563 Dec 11 10:08 LICENSE\r\ndrwxrwx--- 184 franz.srambical franz.srambical 5.2K Dec 12 18:42 [01;34mnode_modules[0m\r\ndrwxrwx--- 3 franz.srambical franz.srambical 86 Dec 12 18:41 [01;34mout[0m\r\n-rw-rw---- 1 franz.srambical franz.srambical 4.3K Dec 12 18:41 package.json\r\n-rw-rw---- 1 franz.srambical franz.srambical 109K Dec 12 18:42 package-lock.json\r\n-rw-r--r-- 1 franz.srambical franz.srambical 77 Nov 6 18:09 README.md\r\ndrwxr-xr-x 3 franz.srambical franz.srambical 52 Dec 11 17:54 [01;34msrc[0m\r\n-rw-r--r-- 1 franz.srambical franz.srambical 536 Nov 6 18:09 tsconfig.json\r\n-rw-r--r-- 1 franz.srambical franz.srambical 3.1K Nov 6 18:09 vsc-extension-quickstart.md\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 10 |
+
9,33643,"TERMINAL",0,0,"npm run clean",,terminal_command
|
| 11 |
+
10,35471,"TERMINAL",0,0,"ls -lh",,terminal_command
|
| 12 |
+
11,35487,"TERMINAL",0,0,"]633;Ctotal 4.8M\r\n-rw-r--r-- 1 franz.srambical franz.srambical 238 Nov 6 18:09 CHANGELOG.md\r\n-rw-rw---- 1 franz.srambical franz.srambical 2.1M Dec 12 18:42 crowd-pilot-0.0.1.vsix\r\n-rw-rw---- 1 franz.srambical franz.srambical 200K Dec 11 12:45 crowd-pilot-old.vsix\r\ndrwxrwx--- 4 franz.srambical franz.srambical 206 Dec 12 17:59 [0m[01;34mcrowd-pilot-serializer[0m\r\n-rw-rw---- 1 franz.srambical franz.srambical 1022K Dec 12 18:42 [01;31mcrowd-pilot-serializer-0.1.0.tgz[0m\r\n-rw-r--r-- 1 franz.srambical franz.srambical 629 Nov 6 18:09 eslint.config.mjs\r\n-rw-rw---- 1 franz.srambical franz.srambical 563 Dec 11 10:08 LICENSE\r\ndrwxrwx--- 184 franz.srambical franz.srambical 5.2K Dec 12 18:42 [01;34mnode_modules[0m\r\ndrwxrwx--- 3 franz.srambical franz.srambical 86 Dec 12 18:41 [01;34mout[0m\r\n-rw-rw---- 1 franz.srambical franz.srambical 4.3K Dec 12 18:41 package.json\r\n-rw-rw---- 1 franz.srambical franz.srambical 109K Dec 12 18:42 package-lock.json\r\n-rw-r--r-- 1 franz.srambical franz.srambical 77 Nov 6 18:09 README.md\r\ndrwxr-xr-x 3 franz.srambical franz.srambical 52 Dec 11 17:54 [01;34msrc[0m\r\n-rw-r--r-- 1 franz.srambical franz.srambical 536 Nov 6 18:09 tsconfig.json\r\n-rw-r--r-- 1 franz.srambical franz.srambical 3.1K Nov 6 18:09 vsc-extension-quickstart.md\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 13 |
+
12,38539,"TERMINAL",0,0,"module load nodejs",,terminal_command
|
| 14 |
+
13,38590,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 15 |
+
14,38762,"TERMINAL",0,0,"]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 16 |
+
15,40116,"TERMINAL",0,0,"npm run clean",,terminal_command
|
| 17 |
+
16,40168,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 18 |
+
17,41373,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 clean\r\n> rm -rf out *.tgz\r\n\r\n",,terminal_output
|
| 19 |
+
18,41396,"TERMINAL",0,0,"]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 20 |
+
19,43130,"TERMINAL",0,0,"ls -lh",,terminal_command
|
| 21 |
+
20,43145,"TERMINAL",0,0,"]633;Ctotal 3.7M\r\n-rw-r--r-- 1 franz.srambical franz.srambical 238 Nov 6 18:09 CHANGELOG.md\r\n-rw-rw---- 1 franz.srambical franz.srambical 2.1M Dec 12 18:42 crowd-pilot-0.0.1.vsix\r\n-rw-rw---- 1 franz.srambical franz.srambical 200K Dec 11 12:45 crowd-pilot-old.vsix\r\ndrwxrwx--- 4 franz.srambical franz.srambical 206 Dec 12 17:59 [0m[01;34mcrowd-pilot-serializer[0m\r\n-rw-r--r-- 1 franz.srambical franz.srambical 629 Nov 6 18:09 eslint.config.mjs\r\n-rw-rw---- 1 franz.srambical franz.srambical 563 Dec 11 10:08 LICENSE\r\ndrwxrwx--- 184 franz.srambical franz.srambical 5.2K Dec 12 18:42 [01;34mnode_modules[0m\r\n-rw-rw---- 1 franz.srambical franz.srambical 4.3K Dec 12 18:41 package.json\r\n-rw-rw---- 1 franz.srambical franz.srambical 109K Dec 12 18:42 package-lock.json\r\n-rw-r--r-- 1 franz.srambical franz.srambical 77 Nov 6 18:09 README.md\r\ndrwxr-xr-x 3 franz.srambical franz.srambical 52 Dec 11 17:54 [01;34msrc[0m\r\n-rw-r--r-- 1 franz.srambical franz.srambical 536 Nov 6 18:09 tsconfig.json\r\n-rw-r--r-- 1 franz.srambical franz.srambical 3.1K Nov 6 18:09 vsc-extension-quickstart.md\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 22 |
+
21,62204,"crowd-pilot-extension/src/extension.ts",5653,0,"",typescript,selection_command
|
| 23 |
+
22,62606,"crowd-pilot-extension/src/extension.ts",5602,0,"",typescript,selection_command
|
| 24 |
+
23,65064,"crowd-pilot-extension/src/extension.ts",5653,0,"",typescript,selection_command
|
| 25 |
+
24,65162,"crowd-pilot-extension/src/extension.ts",5602,0,"",typescript,selection_command
|
| 26 |
+
25,65898,"crowd-pilot-extension/src/extension.ts",5556,0,"",typescript,selection_command
|
| 27 |
+
26,68795,"Untitled-1",0,0,"",plaintext,tab
|
| 28 |
+
27,69566,"Untitled-1",0,0,"""",plaintext,content
|
| 29 |
+
28,69567,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 30 |
+
29,69706,"Untitled-1",1,0,"""",plaintext,content
|
| 31 |
+
30,69707,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 32 |
+
31,69866,"Untitled-1",2,0,"""",plaintext,content
|
| 33 |
+
32,69867,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 34 |
+
33,71782,"Untitled-1",2,0,"",plaintext,selection_command
|
| 35 |
+
34,75140,"Untitled-1",0,3,"",plaintext,content
|
| 36 |
+
35,75146,"crowd-pilot-extension/src/extension.ts",0,0,"",typescript,tab
|
| 37 |
+
36,75728,"crowd-pilot-extension/src/extension.ts",5602,0,"",typescript,selection_command
|
| 38 |
+
37,76613,"crowd-pilot-extension/src/extension.ts",6162,0,"",typescript,selection_command
|
| 39 |
+
38,106470,"crowd-pilot-extension/.vscodeignore",0,0,"# Source files\nsrc/**\n**/*.ts\n!out/**\n\n# Development files\n.vscode/**\n.git/**\n.gitignore\n.gitmodules\neslint.config.mjs\ntsconfig.json\nvsc-extension-quickstart.md\n*.md\n!readme.md\n!changelog.md\n\n# Submodule source (native addon is in the tarball)\ncrowd-pilot-serializer/**\n\n# Test files\n**/*.test.js\n**/test/**\n\n# Misc\n.crowd-pilot-preferences.jsonl\n**/.DS_Store\n",ignore,tab
|
| 40 |
+
39,107153,"crowd-pilot-extension/.vscodeignore",360,0,"",ignore,selection_mouse
|
| 41 |
+
40,361987,"crowd-pilot-extension/.vscodeignore",346,0,"",ignore,selection_mouse
|
| 42 |
+
41,361995,"crowd-pilot-extension/.vscodeignore",345,0,"",ignore,selection_command
|
| 43 |
+
42,372094,"crowd-pilot-extension/.vscodeignore",270,0,"",ignore,selection_mouse
|
| 44 |
+
43,374769,"crowd-pilot-extension/.gitignore",0,0,"out/\nnode_modules/\n.*",ignore,tab
|
| 45 |
+
44,375741,"crowd-pilot-extension/.gitignore",21,0,"\n",ignore,content
|
| 46 |
+
45,376100,"crowd-pilot-extension/.gitignore",21,1,"",ignore,content
|
| 47 |
+
46,376374,"crowd-pilot-extension/.gitignore",21,0,"!",ignore,content
|
| 48 |
+
47,376374,"crowd-pilot-extension/.gitignore",22,0,"",ignore,selection_keyboard
|
| 49 |
+
48,376865,"crowd-pilot-extension/.gitignore",21,1,"",ignore,content
|
| 50 |
+
49,377042,"crowd-pilot-extension/.gitignore",21,0,"\n",ignore,content
|
| 51 |
+
50,377526,"crowd-pilot-extension/.gitignore",22,0,"\n",ignore,content
|
| 52 |
+
51,377902,"crowd-pilot-extension/.gitignore",23,0,"!",ignore,content
|
| 53 |
+
52,377902,"crowd-pilot-extension/.gitignore",24,0,"",ignore,selection_keyboard
|
| 54 |
+
53,378734,"crowd-pilot-extension/.gitignore",24,0,".",ignore,content
|
| 55 |
+
54,378735,"crowd-pilot-extension/.gitignore",25,0,"",ignore,selection_keyboard
|
| 56 |
+
55,378875,"crowd-pilot-extension/.gitignore",25,0,"v",ignore,content
|
| 57 |
+
56,378876,"crowd-pilot-extension/.gitignore",26,0,"",ignore,selection_keyboard
|
| 58 |
+
57,379669,"crowd-pilot-extension/.gitignore",26,0,"s",ignore,content
|
| 59 |
+
58,379669,"crowd-pilot-extension/.gitignore",27,0,"",ignore,selection_keyboard
|
| 60 |
+
59,379962,"crowd-pilot-extension/.gitignore",27,0,"c",ignore,content
|
| 61 |
+
60,379962,"crowd-pilot-extension/.gitignore",28,0,"",ignore,selection_keyboard
|
| 62 |
+
61,380125,"crowd-pilot-extension/.gitignore",28,0,"o",ignore,content
|
| 63 |
+
62,380125,"crowd-pilot-extension/.gitignore",29,0,"",ignore,selection_keyboard
|
| 64 |
+
63,380287,"crowd-pilot-extension/.gitignore",29,0,"d",ignore,content
|
| 65 |
+
64,380287,"crowd-pilot-extension/.gitignore",30,0,"",ignore,selection_keyboard
|
| 66 |
+
65,380386,"crowd-pilot-extension/.gitignore",30,0,"e",ignore,content
|
| 67 |
+
66,380386,"crowd-pilot-extension/.gitignore",31,0,"",ignore,selection_keyboard
|
| 68 |
+
67,380640,"crowd-pilot-extension/.gitignore",31,0,"i",ignore,content
|
| 69 |
+
68,380641,"crowd-pilot-extension/.gitignore",32,0,"",ignore,selection_keyboard
|
| 70 |
+
69,380751,"crowd-pilot-extension/.gitignore",32,0,"g",ignore,content
|
| 71 |
+
70,380751,"crowd-pilot-extension/.gitignore",33,0,"",ignore,selection_keyboard
|
| 72 |
+
71,382005,"crowd-pilot-extension/.gitignore",23,10,"!.vscodeignore\n",ignore,content
|
| 73 |
+
72,383160,"crowd-pilot-extension/.gitignore",37,1,"",ignore,content
|
| 74 |
+
73,383168,"crowd-pilot-extension/.gitignore",23,0,"",ignore,selection_command
|
| 75 |
+
74,388601,"crowd-pilot-extension/.gitignore",0,0,"",ignore,tab
|
| 76 |
+
75,388601,"crowd-pilot-extension/.gitignore",22,0,"",ignore,selection_command
|
| 77 |
+
76,391480,"crowd-pilot-extension/.vscodeignore",0,0,"",ignore,tab
|
| 78 |
+
77,404179,"crowd-pilot-extension/.vscodeignore",211,0,"",ignore,selection_mouse
|
| 79 |
+
78,407552,"crowd-pilot-extension/.vscodeignore",283,0,"",ignore,selection_mouse
|
| 80 |
+
79,407556,"crowd-pilot-extension/.vscodeignore",282,0,"",ignore,selection_command
|
| 81 |
+
80,410205,"TERMINAL",0,0,"bash",,terminal_focus
|
| 82 |
+
81,424697,"TERMINAL",0,0,"vsce package",,terminal_command
|
| 83 |
+
82,424750,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 84 |
+
83,426218,"TERMINAL",0,0,"Executing prepublish script 'npm run vscode:prepublish'...\r\n",,terminal_output
|
| 85 |
+
84,427353,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 vscode:prepublish\r\n> npm run compile\r\n\r\n",,terminal_output
|
| 86 |
+
85,428417,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 compile\r\n> tsc -p ./\r\n\r\n",,terminal_output
|
| 87 |
+
86,430213,"TERMINAL",0,0,"[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m New [31mmajor[39m version of npm available! [31m10.5.2[39m -> [32m11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Changelog: [36mhttps://github.com/npm/cli/releases/tag/v11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Run [32mnpm install -g npm@11.7.0[39m to update!\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m New [31mmajor[39m version of npm available! [31m10.5.2[39m -> [32m11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Changelog: [36mhttps://github.com/npm/cli/releases/tag/v11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Run [32mnpm install -g npm@11.7.0[39m to update!\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m",,terminal_output
|
| 88 |
+
87,432168,"TERMINAL",0,0,"[104m[30m INFO [39m[49m [1m[34mFiles included in the VSIX:[39m[22m\r\n[1m[34m[39m[22m[1mcrowd-pilot-0.0.1.vsix[22m\r\n├─ [Content_Types].xml \r\n├─ extension.vsixmanifest \r\n└─ [1mextension/[22m\r\n ├─ LICENSE.txt [90m[0.55 KB][39m\r\n ├─ package.json [90m[4.25 KB][39m\r\n ├─ readme.md \r\n ├─ [1mnode_modules/[22m\r\n │ └─ [1m@crowd-pilot/[22m\r\n │ └─ [1mserializer/[22m\r\n │ ├─ index.js [90m[9.6 KB][39m\r\n │ ├─ package.json [90m[0.92 KB][39m\r\n │ └─ serializer.linux-x64-gnu.node [31m[3.05 MB][39m\r\n └─ [1mout/[22m\r\n ├─ extension.js [90m[47.8 KB][39m\r\n ├─ extension.js.map [90m[40.04 KB][39m\r\n └─ [1mtest/[22m\r\n ├─ extension.test.js [90m[1.94 KB][39m\r\n └─ extension.test.js.map [90m[0.6 KB][39m\r\n\r\nThe file extension/node_modules/@crowd-pilot/serializer/serializer.linux-x64-gnu.node is [31mlarge[39m (3.05 MB)\r\n\r\n",,terminal_output
|
| 89 |
+
88,432880,"TERMINAL",0,0,"[42m[30m DONE [39m[49m Packaged: /fast/home/franz.srambical/crowd-pilot-extension/crowd-pilot-0.0.1.vsix [1m(12 files, 1.02 MB)[22m\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 90 |
+
89,441967,"TERMINAL",0,0,"bash",,terminal_focus
|
| 91 |
+
90,443063,"TERMINAL",0,0,"bash",,terminal_focus
|
| 92 |
+
91,444362,"TERMINAL",0,0,"bash",,terminal_focus
|
| 93 |
+
92,446546,"TERMINAL",0,0,"watch (crowd-pilot-extension)",,terminal_focus
|
| 94 |
+
93,447062,"TERMINAL",0,0,"bash",,terminal_focus
|
| 95 |
+
94,476758,"crowd-pilot-extension/package.json",0,0,"{\n ""name"": ""crowd-pilot"",\n ""displayName"": ""crowd-pilot-extension"",\n ""description"": ""Teaching language models to code like humans."",\n ""publisher"": ""p-doom"",\n ""version"": ""0.0.1"",\n ""repository"": {\n ""type"": ""git"",\n ""url"": ""https://github.com/p-doom/crowd-pilot-extension""\n },\n ""engines"": {\n ""vscode"": ""^1.99.3""\n },\n ""categories"": [\n ""Other""\n ],\n ""activationEvents"": [\n ""onStartupFinished""\n ],\n ""main"": ""./out/extension.js"",\n ""contributes"": {\n ""commands"": [\n {\n ""command"": ""crowd-pilot.toggleSuggestions"",\n ""title"": ""crowd-pilot: Toggle Tab Suggestions""\n },\n {\n ""command"": ""crowd-pilot.hideUi"",\n ""title"": ""crowd-pilot: Hide Preview""\n },\n {\n ""command"": ""crowd-pilot.sglangTest"",\n ""title"": ""crowd-pilot: Test SGLang""\n },\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""title"": ""crowd-pilot: Model Plan & Run""\n },\n {\n ""command"": ""crowd-pilot.clearContext"",\n ""title"": ""crowd-pilot: Clear Context""\n },\n {\n ""command"": ""crowd-pilot.openPreferenceLog"",\n ""title"": ""crowd-pilot: Open Preference Log""\n }\n ],\n ""configuration"": {\n ""title"": ""crowd-pilot"",\n ""properties"": {\n ""crowd-pilot.hostname"": {\n ""type"": ""string"",\n ""default"": ""hai002"",\n ""description"": ""Hostname of the SGLang server""\n },\n ""crowd-pilot.port"": {\n ""type"": ""number"",\n ""default"": 30000,\n ""description"": ""Port of the SGLang server""\n },\n ""crowd-pilot.basePath"": {\n ""type"": ""string"",\n ""default"": ""/v1/chat/completions"",\n ""description"": ""Base path for the SGLang API endpoint""\n },\n ""crowd-pilot.modelName"": {\n ""type"": ""string"",\n ""default"": ""qwen/qwen3-8b"",\n ""description"": ""Model name to use for completions""\n },\n ""crowd-pilot.minAvgLogprob"": {\n ""type"": ""number"",\n ""default"": -1.0,\n ""description"": ""Minimum average log-probability per token for displaying suggestions. Higher values (closer to 0) require more confidence. -1.0 ≈ perplexity 2.7""\n },\n ""crowd-pilot.maxContextTokens"": {\n ""type"": ""number"",\n ""default"": 120000,\n ""description"": ""Context length (in tokens). Older messages are truncated to fit. Set below your model's limit to leave room for the response.""\n },\n ""crowd-pilot.enablePreferenceLogging"": {\n ""type"": ""boolean"",\n ""default"": true,\n ""description"": ""Enable logging of accept/reject data for reward model training and RLHF/DPO""\n },\n ""crowd-pilot.preferenceLogPath"": {\n ""type"": ""string"",\n ""default"": """",\n ""description"": ""Custom path for the preference log file (JSONL format). If empty, uses workspace/.crowd-pilot-preferences.jsonl""\n }\n }\n },\n ""keybindings"": [\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""editorTextFocus && crowdPilot.uiVisible""\n },\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""inQuickOpen && crowdPilot.uiVisible""\n },\n {\n ""command"": ""crowd-pilot.hideUi"",\n ""key"": ""escape"",\n ""mac"": ""escape"",\n ""when"": ""crowdPilot.uiVisible""\n }\n ]\n },\n ""scripts"": {\n ""vscode:prepublish"": ""npm run compile"",\n ""compile"": ""tsc -p ./"",\n ""watch"": ""tsc -watch -p ./"",\n ""pretest"": ""npm run compile && npm run lint"",\n ""lint"": ""eslint src"",\n ""test"": ""vscode-test"",\n ""clean"": ""rm -rf out *.tgz"",\n ""clean:all"": ""rm -rf out *.tgz node_modules package-lock.json"",\n ""rebuild-serializer"": ""cd crowd-pilot-serializer/crates/napi && npm install && npm run build && npm pack && mv *.tgz ../../../ && cd ../../.. && npm install""\n },\n ""dependencies"": {\n ""@crowd-pilot/serializer"": ""file:./crowd-pilot-serializer-0.1.0.tgz""\n },\n ""devDependencies"": {\n ""@types/vscode"": ""^1.99.3"",\n ""@types/mocha"": ""^10.0.10"",\n ""@types/node"": ""22.x"",\n ""@typescript-eslint/eslint-plugin"": ""^8.45.0"",\n ""@typescript-eslint/parser"": ""^8.45.0"",\n ""eslint"": ""^9.36.0"",\n ""typescript"": ""^5.9.3"",\n ""@vscode/test-cli"": ""^0.0.11"",\n ""@vscode/test-electron"": ""^2.5.2""\n }\n}\n",json,tab
|
| 96 |
+
95,476759,"crowd-pilot-extension/package.json",3620,0,"",json,selection_command
|
| 97 |
+
96,520084,"crowd-pilot-extension/.vscodeignore",0,0,"# Source files\nsrc/**\n**/*.ts\n!out/**\n\n# Development files\n.vscode/**\n.git/**\n.gitignore\n.gitmodules\neslint.config.mjs\ntsconfig.json\nvsc-extension-quickstart.md\n*.md\n!readme.md\n!changelog.md\n\n# Submodule source (native addon is in the tarball)\ncrowd-pilot-serializer/**\n\n# Test files\n**/*.test.js\n**/test/**\n\n# Misc\n.crowd-pilot-preferences.jsonl\n**/.DS_Store\n",ignore,tab
|
| 98 |
+
97,521494,"crowd-pilot-extension/package.json",0,0,"{\n ""name"": ""crowd-pilot"",\n ""displayName"": ""crowd-pilot-extension"",\n ""description"": ""Teaching language models to code like humans."",\n ""publisher"": ""p-doom"",\n ""version"": ""0.0.1"",\n ""repository"": {\n ""type"": ""git"",\n ""url"": ""https://github.com/p-doom/crowd-pilot-extension""\n },\n ""engines"": {\n ""vscode"": ""^1.99.3""\n },\n ""categories"": [\n ""Other""\n ],\n ""activationEvents"": [\n ""onStartupFinished""\n ],\n ""main"": ""./out/extension.js"",\n ""contributes"": {\n ""commands"": [\n {\n ""command"": ""crowd-pilot.toggleSuggestions"",\n ""title"": ""crowd-pilot: Toggle Tab Suggestions""\n },\n {\n ""command"": ""crowd-pilot.hideUi"",\n ""title"": ""crowd-pilot: Hide Preview""\n },\n {\n ""command"": ""crowd-pilot.sglangTest"",\n ""title"": ""crowd-pilot: Test SGLang""\n },\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""title"": ""crowd-pilot: Model Plan & Run""\n },\n {\n ""command"": ""crowd-pilot.clearContext"",\n ""title"": ""crowd-pilot: Clear Context""\n },\n {\n ""command"": ""crowd-pilot.openPreferenceLog"",\n ""title"": ""crowd-pilot: Open Preference Log""\n }\n ],\n ""configuration"": {\n ""title"": ""crowd-pilot"",\n ""properties"": {\n ""crowd-pilot.hostname"": {\n ""type"": ""string"",\n ""default"": ""hai002"",\n ""description"": ""Hostname of the SGLang server""\n },\n ""crowd-pilot.port"": {\n ""type"": ""number"",\n ""default"": 30000,\n ""description"": ""Port of the SGLang server""\n },\n ""crowd-pilot.basePath"": {\n ""type"": ""string"",\n ""default"": ""/v1/chat/completions"",\n ""description"": ""Base path for the SGLang API endpoint""\n },\n ""crowd-pilot.modelName"": {\n ""type"": ""string"",\n ""default"": ""qwen/qwen3-8b"",\n ""description"": ""Model name to use for completions""\n },\n ""crowd-pilot.minAvgLogprob"": {\n ""type"": ""number"",\n ""default"": -1.0,\n ""description"": ""Minimum average log-probability per token for displaying suggestions. Higher values (closer to 0) require more confidence. -1.0 ≈ perplexity 2.7""\n },\n ""crowd-pilot.maxContextTokens"": {\n ""type"": ""number"",\n ""default"": 120000,\n ""description"": ""Context length (in tokens). Older messages are truncated to fit. Set below your model's limit to leave room for the response.""\n },\n ""crowd-pilot.enablePreferenceLogging"": {\n ""type"": ""boolean"",\n ""default"": true,\n ""description"": ""Enable logging of accept/reject data for reward model training and RLHF/DPO""\n },\n ""crowd-pilot.preferenceLogPath"": {\n ""type"": ""string"",\n ""default"": """",\n ""description"": ""Custom path for the preference log file (JSONL format). If empty, uses workspace/.crowd-pilot-preferences.jsonl""\n }\n }\n },\n ""keybindings"": [\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""editorTextFocus && crowdPilot.uiVisible""\n },\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""inQuickOpen && crowdPilot.uiVisible""\n },\n {\n ""command"": ""crowd-pilot.hideUi"",\n ""key"": ""escape"",\n ""mac"": ""escape"",\n ""when"": ""crowdPilot.uiVisible""\n }\n ]\n },\n ""scripts"": {\n ""vscode:prepublish"": ""npm run compile"",\n ""compile"": ""tsc -p ./"",\n ""watch"": ""tsc -watch -p ./"",\n ""pretest"": ""npm run compile && npm run lint"",\n ""lint"": ""eslint src"",\n ""test"": ""vscode-test"",\n ""clean"": ""rm -rf out *.tgz"",\n ""clean:all"": ""rm -rf out *.tgz node_modules package-lock.json"",\n ""rebuild-serializer"": ""cd crowd-pilot-serializer/crates/napi && npm install && npm run build && npm pack && mv *.tgz ../../../ && cd ../../.. && npm install""\n },\n ""dependencies"": {\n ""@crowd-pilot/serializer"": ""file:./crowd-pilot-serializer-0.1.0.tgz""\n },\n ""devDependencies"": {\n ""@types/vscode"": ""^1.99.3"",\n ""@types/mocha"": ""^10.0.10"",\n ""@types/node"": ""22.x"",\n ""@typescript-eslint/eslint-plugin"": ""^8.45.0"",\n ""@typescript-eslint/parser"": ""^8.45.0"",\n ""eslint"": ""^9.36.0"",\n ""typescript"": ""^5.9.3"",\n ""@vscode/test-cli"": ""^0.0.11"",\n ""@vscode/test-electron"": ""^2.5.2""\n }\n}\n",json,tab
|
| 99 |
+
98,521495,"crowd-pilot-extension/package.json",3620,0,"",json,selection_command
|
| 100 |
+
99,545778,"crowd-pilot-extension/package.json",0,0,"{\n ""name"": ""crowd-pilot"",\n ""displayName"": ""crowd-pilot-extension"",\n ""description"": ""Teaching language models to code like humans."",\n ""publisher"": ""p-doom"",\n ""version"": ""0.0.1"",\n ""repository"": {\n ""type"": ""git"",\n ""url"": ""https://github.com/p-doom/crowd-pilot-extension""\n },\n ""engines"": {\n ""vscode"": ""^1.99.3""\n },\n ""categories"": [\n ""Other""\n ],\n ""activationEvents"": [\n ""onStartupFinished""\n ],\n ""main"": ""./out/extension.js"",\n ""contributes"": {\n ""commands"": [\n {\n ""command"": ""crowd-pilot.toggleSuggestions"",\n ""title"": ""crowd-pilot: Toggle Tab Suggestions""\n },\n {\n ""command"": ""crowd-pilot.hideUi"",\n ""title"": ""crowd-pilot: Hide Preview""\n },\n {\n ""command"": ""crowd-pilot.sglangTest"",\n ""title"": ""crowd-pilot: Test SGLang""\n },\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""title"": ""crowd-pilot: Model Plan & Run""\n },\n {\n ""command"": ""crowd-pilot.clearContext"",\n ""title"": ""crowd-pilot: Clear Context""\n },\n {\n ""command"": ""crowd-pilot.openPreferenceLog"",\n ""title"": ""crowd-pilot: Open Preference Log""\n }\n ],\n ""configuration"": {\n ""title"": ""crowd-pilot"",\n ""properties"": {\n ""crowd-pilot.hostname"": {\n ""type"": ""string"",\n ""default"": ""hai002"",\n ""description"": ""Hostname of the SGLang server""\n },\n ""crowd-pilot.port"": {\n ""type"": ""number"",\n ""default"": 30000,\n ""description"": ""Port of the SGLang server""\n },\n ""crowd-pilot.basePath"": {\n ""type"": ""string"",\n ""default"": ""/v1/chat/completions"",\n ""description"": ""Base path for the SGLang API endpoint""\n },\n ""crowd-pilot.modelName"": {\n ""type"": ""string"",\n ""default"": ""qwen/qwen3-8b"",\n ""description"": ""Model name to use for completions""\n },\n ""crowd-pilot.minAvgLogprob"": {\n ""type"": ""number"",\n ""default"": -1.0,\n ""description"": ""Minimum average log-probability per token for displaying suggestions. Higher values (closer to 0) require more confidence. -1.0 ≈ perplexity 2.7""\n },\n ""crowd-pilot.maxContextTokens"": {\n ""type"": ""number"",\n ""default"": 120000,\n ""description"": ""Context length (in tokens). Older messages are truncated to fit. Set below your model's limit to leave room for the response.""\n },\n ""crowd-pilot.enablePreferenceLogging"": {\n ""type"": ""boolean"",\n ""default"": true,\n ""description"": ""Enable logging of accept/reject data for reward model training and RLHF/DPO""\n },\n ""crowd-pilot.preferenceLogPath"": {\n ""type"": ""string"",\n ""default"": """",\n ""description"": ""Custom path for the preference log file (JSONL format). If empty, uses workspace/.crowd-pilot-preferences.jsonl""\n }\n }\n },\n ""keybindings"": [\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""editorTextFocus && crowdPilot.uiVisible""\n },\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""inQuickOpen && crowdPilot.uiVisible""\n },\n {\n ""command"": ""crowd-pilot.hideUi"",\n ""key"": ""escape"",\n ""mac"": ""escape"",\n ""when"": ""crowdPilot.uiVisible""\n }\n ]\n },\n ""scripts"": {\n ""vscode:prepublish"": ""npm run compile"",\n ""compile"": ""tsc -p ./"",\n ""watch"": ""tsc -watch -p ./"",\n ""pretest"": ""npm run compile && npm run lint"",\n ""lint"": ""eslint src"",\n ""test"": ""vscode-test""\n },\n ""dependencies"": {\n ""@crowd-pilot/serializer"": ""file:./crowd-pilot-serializer/crates/napi""\n },\n ""devDependencies"": {\n ""@types/vscode"": ""^1.99.3"",\n ""@types/mocha"": ""^10.0.10"",\n ""@types/node"": ""22.x"",\n ""@typescript-eslint/eslint-plugin"": ""^8.45.0"",\n ""@typescript-eslint/parser"": ""^8.45.0"",\n ""eslint"": ""^9.36.0"",\n ""typescript"": ""^5.9.3"",\n ""@vscode/test-cli"": ""^0.0.11"",\n ""@vscode/test-electron"": ""^2.5.2""\n }\n}\n",json,tab
|
| 101 |
+
100,545778,"crowd-pilot-extension/package.json",3418,0,"",json,selection_mouse
|
| 102 |
+
101,545781,"crowd-pilot-extension/package.json",3417,0,"",json,selection_command
|
| 103 |
+
102,546265,"crowd-pilot-extension/package.json",0,0,"",json,tab
|
| 104 |
+
103,546267,"crowd-pilot-extension/.vscodeignore",0,0,"",ignore,tab
|
| 105 |
+
104,550431,"crowd-pilot-extension/.gitmodules",0,0,"[submodule ""crowd-pilot-serializer""]\n\tpath = crowd-pilot-serializer\n\turl = https://github.com/p-doom/crowd-pilot-serializer\n",properties,tab
|
| 106 |
+
105,551496,"crowd-pilot-extension/.gitmodules",124,0,"",properties,selection_mouse
|
| 107 |
+
106,564280,"TERMINAL",0,0,"git status",,terminal_command
|
| 108 |
+
107,564324,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 109 |
+
108,564400,"TERMINAL",0,0,"On branch rust-binding-for-serialization\r\nChanges to be committed:\r\n (use ""git restore --staged <file>..."" to unstage)\r\n\t[32mmodified: .gitignore[m\r\n\t[32mnew file: .vscodeignore[m\r\n\t[32mmodified: package.json[m\r\n\r\nChanges not staged for commit:\r\n (use ""git add <file>..."" to update what will be committed)\r\n (use ""git restore <file>..."" to discard changes in working directory)\r\n\t[31mmodified: crowd-pilot-serializer[m (new commits)\r\n\t[31mmodified: package-lock.json[m\r\n\r\nUntracked files:\r\n (use ""git add <file>..."" to include in what will be committed)\r\n\t[31mcrowd-pilot-0.0.1.vsix[m\r\n\t[31mcrowd-pilot-old.vsix[m\r\n\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 110 |
+
109,577813,"TERMINAL",0,0,"git add crowd-pilot-serializer/",,terminal_command
|
| 111 |
+
110,577856,"TERMINAL",0,0,"]633;C]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-372ef62d-2075-43ff-ac1a-e2025fd873c41751612450082-2025_07_04-09.01.47.125/source.csv
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,30,"tests/test_checkpointer.py",0,0,"import unittest\nimport tempfile\nimport os\nimport jax\nimport jax.numpy as jnp\nfrom flax.training import orbax_utils\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom pathlib import Path\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom flax.training.train_state import TrainState\nimport optax\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\n\nclass DistributedCheckpointerTest(unittest.TestCase):\n def setUp(self):\n super().setUp()\n self._temp_dir_manager = tempfile.TemporaryDirectory()\n self.checkpoint_dir = Path(self._temp_dir_manager.name)\n self.addCleanup(self._temp_dir_manager.cleanup)\n\n # FIXME (f.srambical): If the tests pass, we should use the default model config instead\n self.model_kwargs = dict(\n in_dim=3,\n model_dim=8,\n latent_dim=4,\n num_latents=16,\n patch_size=2,\n num_blocks=1,\n num_heads=1,\n dropout=0.0,\n codebook_dropout=0.0,\n )\n self.image_shape = (8, 8, 3)\n self.seq_len = 2\n self.batch_size = 2\n self.seed = 0\n\n def test_distributed_checkpointing(self):\n jax.distributed.initialize()\n num_devices = jax.device_count()\n self.assertGreater(num_devices, 0)\n\n model = TokenizerVQVAE(**self.model_kwargs)\n rng = jax.random.PRNGKey(self.seed)\n dummy_inputs = dict(\n videos=jnp.zeros((self.batch_size, self.seq_len, *self.image_shape), dtype=jnp.float32)\n )\n params = model.init(rng, dummy_inputs)\n\n tx = optax.adam(1e-3)\n state = TrainState.create(apply_fn=model.apply, params=params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n state = jax.device_put(state, replicated_sharding)\n\n ckpt = {""model"": state}\n orbax_checkpointer = PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n ckpt_path = str(self.checkpoint_dir / ""test_ckpt"")\n orbax_checkpointer.save(ckpt_path, ckpt, save_args=save_args)\n self.assertTrue(os.path.exists(ckpt_path))\n\n restore_target = {""model"": state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n restored = orbax_checkpointer.restore(ckpt_path, item=restore_target, restore_args=restore_args)\n # Compare parameters recursively, handling nested structure\n def compare_params(original, restored):\n if isinstance(original, dict):\n for k in original.keys():\n compare_params(original[k], restored[k])\n else:\n self.assertTrue(jnp.allclose(original, restored))\n \n compare_params(state.params, restored[""model""].params)\n\nif __name__ == ""__main__"":\n unittest.main()\n",python,tab
|
| 3 |
+
2,816,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:01:45 AM [info] Activating crowd-code\n9:01:47 AM [info] Recording started\n9:01:47 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 4 |
+
3,1444,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"9:01:47 AM [info] Git repository found\n9:01:47 AM [info] Git provider initialized successfully\n9:01:47 AM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,10577,"TERMINAL",0,0,"/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
|
| 6 |
+
5,10616,"TERMINAL",0,0,"]633;E;/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;0208a1bb-6063-4ca7-88cc-59ff15d627ad]633;C",,terminal_output
|
| 7 |
+
6,10697,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit01:/ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
|
| 8 |
+
7,27626,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69 --cpus-per-task=1 --ntasks-per-node=16",,terminal_command
|
| 9 |
+
8,27705,"TERMINAL",0,0,"]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69 --cpus-per-task=1 --ntasks-per-node=16;d83d794d-068d-45a7-86c8-da2446d84194]633;Csalloc: Pending job allocation 26666312\r\nsalloc: job 26666312 queued and waiting for resources\r\n",,terminal_output
|
| 10 |
+
9,33297,"TERMINAL",0,0,"bash",,terminal_focus
|
| 11 |
+
10,35170,"TERMINAL",0,0,"idle",,terminal_command
|
| 12 |
+
11,35173,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;idle;a8707c05-ae9b-4a50-91c9-fa9c06501dad]633;Cbash: idle: command not found\r\n]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D;127",,terminal_output
|
| 13 |
+
12,41878,"TERMINAL",0,0,"squeue -w supergpu16,supergpu18,gpusrv[69,70],supergpu14",,terminal_command
|
| 14 |
+
13,41936,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;squeue -w supergpu16,supergpu18,gpusrv[69,70],supergpu14;a8707c05-ae9b-4a50-91c9-fa9c06501dad]633;C[?25h",,terminal_output
|
| 15 |
+
14,41937,"TERMINAL",0,0," JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\r\n 26649778 gpu_p test_kto muhammad R 15:15:33 1 supergpu14\r\n 26644304 gpu_p old_gpt helena.f R 14:08:26 1 supergpu14\r\n",,terminal_output
|
| 16 |
+
15,44976,"TERMINAL",0,0,"salloc",,terminal_focus
|
| 17 |
+
16,50016,"TERMINAL",0,0,"bash",,terminal_focus
|
| 18 |
+
17,58348,"TERMINAL",0,0,"",,terminal_focus
|
| 19 |
+
18,58857,"TERMINAL",0,0,"",,terminal_focus
|
| 20 |
+
19,60096,"TERMINAL",0,0,"bash",,terminal_focus
|
| 21 |
+
20,60736,"TERMINAL",0,0,"salloc",,terminal_focus
|
| 22 |
+
21,63016,"TERMINAL",0,0,"^Csalloc: Job allocation 26666312 has been revoked.\r\nsalloc: Job aborted due to signal\r\n",,terminal_output
|
| 23 |
+
22,67417,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv70 --cpus-per-task=1 --ntasks-per-node=16",,terminal_command
|
| 24 |
+
23,67496,"TERMINAL",0,0,"]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv70 --cpus-per-task=1 --ntasks-per-node=16;d83d794d-068d-45a7-86c8-da2446d84194]633;Csalloc: Pending job allocation 26666314\r\nsalloc: job 26666314 queued and waiting for resources\r\n",,terminal_output
|
| 25 |
+
24,78434,"TERMINAL",0,0,"^Csalloc: Job allocation 26666314 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D;1]633;P;Cwd=/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h",,terminal_output
|
| 26 |
+
25,88537,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=1 --ntasks-per-node=1",,terminal_command
|
| 27 |
+
26,88615,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=1 --ntasks-per-node=1;d83d794d-068d-45a7-86c8-da2446d84194]633;Csalloc: Granted job allocation 26666315\r\n",,terminal_output
|
| 28 |
+
27,88708,"TERMINAL",0,0,"salloc: Nodes gpusrv[69-70] are ready for job\r\n",,terminal_output
|
| 29 |
+
28,89048,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h[franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
| 30 |
+
29,97467,"TERMINAL",0,0,"\r(reverse-i-search)`': [K",,terminal_output
|
| 31 |
+
30,97592,"TERMINAL",0,0,"s': python -m unittest tests.te[7ms[27mt_checkpointer -v",,terminal_output
|
| 32 |
+
31,97669,"TERMINAL",0,0,"[?25l[58;52H[0mt[58;52H[58;51H[7;39;49ms[58;51H[0m\r[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[Co': . ""/ictstr01/home/aih/franz.srambical/.cur[7mso[27mr-server/bin/5b19bac7a947f54e4caa3eb7e4c5fbf832389850/out/vs/workbench/contrib/terminal/common/scripts/shellIntegration-bash.sh""[A[A[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[?25h",,terminal_output
|
| 33 |
+
32,97736,"TERMINAL",0,0,"\r[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[24Pu': [7msou[27mrce .venv/bin/activate\r\n\r[K\r\n\r[K[A[A[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[1@r': [7msour[27m",,terminal_output
|
| 34 |
+
33,97908,"TERMINAL",0,0,"[?25l[56;28H[7;39;49mo[56;28H[56;27H[7;39;49ms[56;27H[0m[1@c': [7msourc[27m[?25h[1@e': [7msource[27m",,terminal_output
|
| 35 |
+
34,98276,"TERMINAL",0,0,"[?25l[56;29H\r[6@[franz.srambical@gpusrv69 jafar]$ source\r\n[?2004l\r]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h(jafar) [franz.srambical@gpusrv69 jafar]$ [?25h",,terminal_output
|
| 36 |
+
35,98428,"TERMINAL",0,0,"\r(reverse-i-search)`': [K",,terminal_output
|
| 37 |
+
36,99288,"TERMINAL",0,0,"u': so[7mu[27mrce .venv/bin/activaten': python -m [7mun[27mittest tests.test_checkpointer -v",,terminal_output
|
| 38 |
+
37,99407,"TERMINAL",0,0,"[?25l[57;36H[7;39;49mn[57;36H[57;35H[7;39;49mu[57;35H[0m[1@i': python -m [7muni[27m[?25h",,terminal_output
|
| 39 |
+
38,99596,"TERMINAL",0,0,"[1@t': python -m [7munit[27m",,terminal_output
|
| 40 |
+
39,99728,"TERMINAL",0,0,"[?25l[57;37H[7;39;49mu[57;37H[0m[1@t': python -m [7munitt[27m[?25h",,terminal_output
|
| 41 |
+
40,100124,"TERMINAL",0,0,"\r[Cjafar) [franz.srambical@gpusrv69 jafar]$ python -m unittest tests.test_checkpointer -v[A[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C\r\n\r[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
| 42 |
+
41,100566,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output
|
| 43 |
+
42,132885,"tests/test_checkpointer.py",0,0,"",python,tab
|
| 44 |
+
43,166211,"TERMINAL",0,0,"test_distributed_checkpointing (tests.test_checkpointer.DistributedCheckpointerTest) ... ",,terminal_output
|
| 45 |
+
44,169265,"TERMINAL",0,0,"bash",,terminal_focus
|
| 46 |
+
45,472087,"TERMINAL",0,0,"2025-07-04 09:09:39.070277: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: DEADLINE_EXCEEDED: Deadline Exceeded\r\n\r\nRPC: /tensorflow.CoordinationService/RegisterTask\r\n",,terminal_output
|
| 47 |
+
46,472174,"TERMINAL",0,0,"Aborted (core dumped)\r\n]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h(jafar) [franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-37584002-fec7-4d34-873a-4137a623e46d1757936313205-2025_09_15-13.38.38.949/source.csv
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"input_pipeline/download/huggingface/download_openai_array_records.sh",0,0,"#!/bin/bash\n\n# Download and extract array records from Hugging Face\n# \n# This script performs a two-step process:\n# 1. Downloads compressed array records from a Hugging Face dataset repository\n# 2. Extracts the compressed tar files in parallel for better performance\n#\n# Usage:\n# ./download_openai_array_records.sh [hf_download_dir] [final_dataset_dir]\n#\n# Arguments:\n# hf_download_dir - Directory to store compressed downloads (default: data/minecraft_arrayrecords_compressed)\n# final_dataset_dir - Directory for extracted array records (default: data/minecraft_arrayrecords)\n\n# Set default directories if not provided as arguments\nhf_download_dir=""${1:-data/minecraft_arrayrecords_compressed}"" \nfinal_dataset_dir=""${2:-data/minecraft_arrayrecords}"" \n\nmkdir -p $hf_download_dir\nmkdir -p $final_dataset_dir\n\n# Step 1: Download compressed dataset from Hugging Face\necho ""Starting download from Hugging Face...""\nrepo_id=p-doom/open_ai_minecraft_arrayrecords_chunked\nstart_time_hf_download=$(date +%s)\n\nHF_HUB_ENABLE_HF_TRANSFER=1 HF_HUB_DISABLE_SYMLINKS=1 \\nhuggingface-cli download --repo-type dataset $repo_id --local-dir $hf_download_dir\n\nend_time_hf_download=$(date +%s)\necho ""Download completed. Time taken: $((end_time_hf_download - start_time_hf_download)) seconds""\n\n# Step 2: Extract compressed array records in parallel\necho ""Starting parallel extraction of tar files...""\nnum_workers=64 # Number of parallel extraction processes\nstart_time_uncompress=$(date +%s)\n\n# Find all shard tar files and extract them in parallel:\nxargs -0 -P $num_workers -I {} bash -c 'echo ""Extracting {}""; tar -xf ""{}"" -C ""'$final_dataset_dir'""'\n\nend_time_uncompress=$(date +%s)\n\n# Display timing summary\necho ""================================""\necho ""Extraction completed successfully!""\necho ""Uncompress time: $((end_time_uncompress - start_time_uncompress)) seconds""\necho ""Download time: $((end_time_hf_download - start_time_hf_download)) seconds""\necho ""Total time: $((end_time_uncompress - start_time_hf_download)) seconds""\necho ""Final dataset location: $final_dataset_dir""\n",shellscript,tab
|
| 3 |
+
2,42,"input_pipeline/download/huggingface/download_openai_array_records.sh",1018,0,"",shellscript,selection_command
|
| 4 |
+
3,162,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:38:38 PM [info] Activating crowd-code\n1:38:38 PM [info] Recording started\n1:38:38 PM [info] Initializing git provider using file system watchers...\n1:38:39 PM [info] Git repository found\n1:38:39 PM [info] Git provider initialized successfully\n1:38:39 PM [info] Initial git state: [object Object]\n",Log,tab
|
| 5 |
+
4,164,"extension-output-pdoom-org.crowd-code-#1-crowd-code",40,0,"",Log,selection_command
|
| 6 |
+
5,368,"extension-output-pdoom-org.crowd-code-#1-crowd-code",76,0,"",Log,selection_command
|
| 7 |
+
6,374,"extension-output-pdoom-org.crowd-code-#1-crowd-code",40,0,"",Log,selection_command
|
| 8 |
+
7,1145,"TERMINAL",0,0,"",,terminal_focus
|
| 9 |
+
8,1688,"input_pipeline/download/huggingface/download_openai_array_records.sh",0,0,"",shellscript,tab
|
| 10 |
+
9,1784,"input_pipeline/download/huggingface/download_openai_array_records.sh",1019,0,"",shellscript,selection_command
|
| 11 |
+
10,1961,"TERMINAL",0,0,"source /home/franz.srambical/jafar/.venv/bin/activate",,terminal_command
|
| 12 |
+
11,5709,"TERMINAL",0,0,"",,terminal_command
|
| 13 |
+
12,12528,"TERMINAL",0,0,"",,terminal_command
|
| 14 |
+
13,25562,"generate_arrayrecord_dataset.py",0,0,"#!/usr/bin/env python3\n""""""\nGenerate ArrayRecord dataset compatible with train_tokenizer.py\n\nThis script creates synthetic video data and saves it in ArrayRecord format\nthat can be used by the tokenizer training script.\n""""""\n\nimport os\nimport pickle\nimport numpy as np\nimport grain\nfrom array_record.python.array_record_module import ArrayRecordWriter\nimport argparse\nimport multiprocessing as mp\nfrom functools import partial\nimport time\n\n\ndef generate_synthetic_video(\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3\n) -> np.ndarray:\n """"""\n Generate synthetic video data with random frames for training.\n \n Args:\n seq_len: Number of frames in the video sequence\n height: Height of each frame\n width: Width of each frame\n channels: Number of color channels\n \n Returns:\n Video array of shape (seq_len, height, width, channels)\n """"""\n video = np.random.rand(seq_len, height, width, channels).astype(np.float32)\n \n return video\n\n\ndef create_single_arrayrecord_file(\n file_info: tuple,\n output_dir: str,\n seq_len: int,\n height: int,\n width: int,\n channels: int,\n records_per_file: int,\n seed: int\n) -> tuple:\n """"""\n Create a single ArrayRecord file with synthetic video data.\n \n Args:\n file_info: Tuple of (file_idx, start_idx, end_idx)\n output_dir: Directory to save ArrayRecord files\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n \n Returns:\n Tuple of (file_path, num_videos_created, success)\n """"""\n file_idx, start_idx, end_idx = file_info\n videos_in_file = end_idx - start_idx\n \n # Set seed for this process (add file_idx to make each file different)\n np.random.seed(seed + file_idx)\n \n file_path = os.path.join(output_dir, f""videos_{file_idx:04d}.array_record"")\n \n try:\n writer = ArrayRecordWriter(file_path, ""group_size:1"")\n \n for video_idx in range(videos_in_file):\n video = generate_synthetic_video(seq_len, height, width, channels)\n \n # Convert to uint8 format as expected by the dataloader\n video_uint8 = (video * 255).astype(np.uint8)\n \n # Create record in the format expected by ProcessEpisodeAndSlice\n record = {\n ""raw_video"": video_uint8.tobytes(),\n ""sequence_length"": seq_len\n }\n \n writer.write(pickle.dumps(record))\n \n writer.close()\n return (file_path, videos_in_file, True)\n \n except Exception as e:\n print(f""Error creating file {file_path}: {e}"")\n return (file_path, 0, False)\n\n\ndef create_arrayrecord_dataset(\n output_dir: str,\n num_videos: int = 1000,\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3,\n records_per_file: int = 100,\n seed: int = 42,\n num_processes: int | None = None\n):\n """"""\n Create ArrayRecord dataset with synthetic video data using multiprocessing.\n \n Args:\n output_dir: Directory to save ArrayRecord files\n num_videos: Total number of videos to generate\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n num_processes: Number of processes to use (None for auto-detect)\n """"""\n os.makedirs(output_dir, exist_ok=True)\n \n num_files = (num_videos + records_per_file - 1) // records_per_file\n \n print(f""Generating {num_videos} videos across {num_files} ArrayRecord files..."")\n print(f""Each file will contain up to {records_per_file} videos"")\n print(f""Video dimensions: {seq_len} frames × {height}×{width}×{channels}"")\n \n # Prepare file information for each worker\n file_infos = []\n for file_idx in range(num_files):\n start_idx = file_idx * records_per_file\n end_idx = min((file_idx + 1) * records_per_file, num_videos)\n file_infos.append((file_idx, start_idx, end_idx))\n \n # Set number of processes (use CPU count if not specified)\n if num_processes is None:\n num_processes = min(mp.cpu_count(), num_files)\n \n print(f""Using {num_processes} processes for parallel generation..."")\n \n start_time = time.time()\n \n # Create partial function with fixed arguments\n worker_func = partial(\n create_single_arrayrecord_file,\n output_dir=output_dir,\n seq_len=seq_len,\n height=height,\n width=width,\n channels=channels,\n records_per_file=records_per_file,\n seed=seed\n )\n \n # Use multiprocessing to create files in parallel\n with mp.Pool(processes=num_processes) as pool:\n results = pool.map(worker_func, file_infos)\n \n end_time = time.time()\n \n # Process results\n total_records = 0\n successful_files = 0\n \n for file_path, num_videos_created, success in results:\n if success:\n print(f""✓ Created {file_path} with {num_videos_created} videos"")\n total_records += num_videos_created\n successful_files += 1\n else:\n print(f""✗ Failed to create {file_path}"")\n \n print(f""\nDataset generation complete!"")\n print(f""Total videos generated: {total_records}"")\n print(f""Successful files: {successful_files}/{num_files}"")\n print(f""Files created in: {output_dir}"")\n print(f""Generation time: {end_time - start_time:.2f} seconds"")\n\n\ndef verify_dataset(output_dir: str, num_samples: int = 5):\n """"""\n Verify the generated dataset using Grain's ArrayRecordDataSource.\n \n Args:\n output_dir: Directory containing ArrayRecord files\n num_samples: Number of samples to verify\n """"""\n print(f""\nVerifying dataset in {output_dir}..."")\n \n # Find all ArrayRecord files\n array_record_files = [\n os.path.join(output_dir, f) \n for f in os.listdir(output_dir) \n if f.endswith('.array_record')\n ]\n \n if not array_record_files:\n print(""No ArrayRecord files found!"")\n return\n \n print(f""Found {len(array_record_files)} ArrayRecord files"")\n \n # Use Grain's ArrayRecordDataSource as shown in the documentation\n try:\n data_source = grain.sources.ArrayRecordDataSource(array_record_files[0])\n print(f""Number of records in first file: {len(data_source)}"")\n \n # Load and verify a few samples\n for i in range(min(num_samples, len(data_source))):\n record_bytes = data_source[i]\n record = pickle.loads(record_bytes)\n \n # Reconstruct video from raw_video bytes\n video_shape = (record[""sequence_length""], 90, 160, 3) # Hardcoded for now\n video = np.frombuffer(record[""raw_video""], dtype=np.uint8).reshape(video_shape)\n \n print(f"" Record {i}: video shape = {video.shape}, dtype = {video.dtype}"")\n print(f"" Value range: [{video.min()}, {video.max()}]"")\n print(f"" Mean: {video.mean():.1f}"")\n print(f"" Sequence length: {record['sequence_length']}"")\n \n except Exception as e:\n print(f""Error reading ArrayRecord file: {e}"")\n print(""This might indicate a file format issue."")\n\n\ndef main():\n parser = argparse.ArgumentParser(description=""Generate ArrayRecord dataset for tokenizer training"")\n parser.add_argument(""--output_dir"", type=str, default=""data_arrayrecord/dummy"",\n help=""Output directory for ArrayRecord files"")\n parser.add_argument(""--num_videos"", type=int, default=1000,\n help=""Total number of videos to generate"")\n parser.add_argument(""--seq_len"", type=int, default=16,\n help=""Number of frames per video"")\n parser.add_argument(""--height"", type=int, default=90,\n help=""Frame height"")\n parser.add_argument(""--width"", type=int, default=160,\n help=""Frame width"")\n parser.add_argument(""--channels"", type=int, default=3,\n help=""Number of color channels"")\n parser.add_argument(""--records_per_file"", type=int, default=100,\n help=""Number of records per ArrayRecord file"")\n parser.add_argument(""--seed"", type=int, default=42,\n help=""Random seed for reproducibility"")\n parser.add_argument(""--num_processes"", type=int, default=None,\n help=""Number of processes to use (default: auto-detect)"")\n parser.add_argument(""--verify"", action=""store_true"",\n help=""Verify the generated dataset"")\n \n args = parser.parse_args()\n \n create_arrayrecord_dataset(\n output_dir=args.output_dir,\n num_videos=args.num_videos,\n seq_len=args.seq_len,\n height=args.height,\n width=args.width,\n channels=args.channels,\n records_per_file=args.records_per_file,\n seed=args.seed,\n num_processes=args.num_processes\n )\n \n # Verify if requested\n if args.verify:\n verify_dataset(args.output_dir)\n \nif __name__ == ""__main__"":\n main()\n",python,tab
|
| 15 |
+
14,36568,"input_pipeline/download/huggingface/download_openai_array_records.sh",0,0,"",shellscript,tab
|
| 16 |
+
15,55363,"README.md",0,0,"<h1 align=""center"">🧞♀️ Jasmine: A simple, performant and scalable JAX-based world modeling codebase 🧞♀️</h1>\n\n<p align=""center"">\n <a href= ""https://github.com/FLAIROx/jafar/blob/main/LICENSE"">\n <img src=""https://img.shields.io/badge/license-Apache2.0-blue.svg"" /></a>\n <a href= ""https://github.com/psf/black"">\n <img src=""https://img.shields.io/badge/code%20style-black-000000.svg"" /></a>\n</p>\n\nJasmine is a production-ready JAX-based world modeling codebase. It currently implements the high-level architecture of [Genie: Generative Interactive Environments](https://arxiv.org/abs/2402.15391) (Bruce et al., 2024) with [MaskGIT](https://arxiv.org/abs/2202.04200) (Chang et al., 2022), as well as an autoregressive (causal) baseline. A diffusion baseline is coming soon.\n\nJasmine scales from single hosts to hundreds of xPUs thanks to XLA and strives to be an easily hackable, batteries-included foundation for world modeling research.\n\n<h2 name=""overview"" id=""overview"">Overview</h2>\n\n- Asynchronous & distributed checkpointing thanks to [orbax.checkpoint](https://github.com/google/orbax)\n - Jasmine also supports mixing and matching hardware topologies (e.g. train on four nodes, load the checkpoint on a single node)\n- Optimized dataloading thanks to [Grain](https://github.com/google/grain)\n - Dataloading scales with the number of processes (i.e. nodes/xPUs)\n- Checkpointing of model weights, optimizer and dataloader states\n- Full reproducibility with **identical** training curves (thanks to seeded dataloading and training, and [JAX' approach to pseudo random numbers](https://docs.jax.dev/en/latest/random-numbers.html))\n- Automatic checkpoint deletion/retention according to specified retention policy thanks to `orbax.checkpoint.CheckpointManager`\n- Mixed precision training using `bfloat16`\n - `int8` training is on the roadmap via [aqt](https://github.com/google/aqt)\n- FlashAttention thanks to [cuDNN SDPA](https://github.com/jax-ml/jax/blob/a155c5a9997924170e0067d552351a9833c12c11/jax/_src/cudnn/fused_attention_stablehlo.py#L842)\n- Frame-level KV cache resets for accelerated spatiotemporal attention in causal baseline (still in PR)\n- Activation checkpointing (even onto host memory if desired)\n- DDP (changing to FSDP requires changing **a single line of code**)\n- WSD learning rate schedule\n - No need to retrain from scratch if you want to train for longer\n- Index-shuffling during dataloading\n- Google-native stack\n - https://github.com/google/orbax for checkpointing\n - https://github.com/google/grain for dataloading\n - https://github.com/google-deepmind/dm_pix for image manipulation\n - https://github.com/google/array_record as the data format\n- Easy model inspection thanks to [treescope](https://github.com/google-deepmind/treescope)\n- Modularized training script for easy inspection using notebooks ([demo notebook](https://colab.research.google.com/drive/1zHkciFIZxXloJgue9F5LtFlA0m00rJIf?usp=sharing))\n- Easy model surgery thanks to the new [flax.nnx](https://flax.readthedocs.io/en/latest/migrating/linen_to_nnx.html) API\n- [Shape suffixes](https://medium.com/@NoamShazeer/shape-suffixes-good-coding-style-f836e72e24fd) throughout the repository\n\n<h2 name=""start"" id=""start"">Setup 🧗</h2>\n\nJasmine requires `python 3.10`, `jax 0.6.2`, and `flax 0.10.7`. To install the requirements, run:\n\n```bash\npip install -r requirements.txt\npre-commit install\n```\n\n---\n\n<h2 name=""dataset"" id=""dataset"">Dataset 📂</h2>\n\nYou can either download our preprocessed dataset from [Hugging Face](https://huggingface.co/datasets/p-doom/open_ai_minecraft_arrayrecords_chunked) or preprocess [OpenAI's VPT dataset](https://github.com/openai/Video-Pre-Training) manually.\n\n### Option 1: Use Preprocessed Dataset (Recommended)\n\nThe easiest way to get started is to download our preprocessed dataset from Hugging Face. This script will handle downloading and extracting it:\n\n```bash\nbash input_pipeline/download/huggingface/download_openai_array_records.sh\n```\n\n---\n\n### Option 2: Manual Download & Preprocessing of OpenAI's VPT Dataset\n\nIf you prefer to use the raw VPT dataset from OpenAI and preprocess it yourself, follow these steps:\n\n1. **Download index files:**\n This will download the initial index file:\n\n ```bash\n bash input_pipeline/download/openai/download_index_files.sh\n ```\n\n2. **Download from all index files:**\n This may take a long time depending on your bandwidth:\n\n ```bash\n python input_pipeline/download/openai/download_videos.py --index_file_path data/open_ai_index_files/all_7xx_Apr_6.json\n python input_pipeline/download/openai/download_videos.py --index_file_path data/open_ai_index_files/all_8xx_Jun_29.json\n python input_pipeline/download/openai/download_videos.py --index_file_path data/open_ai_index_files/all_9xx_Jun_29.json\n python input_pipeline/download/openai/download_videos.py --index_file_path data/open_ai_index_files/all_10xx_Jun_29.json\n ```\n\n3. **Preprocess videos into ArrayRecords:**\n For efficient distributed training, convert the raw videos into the arrayrecord format (make sure to have [ffmpeg](https://github.com/FFmpeg/FFmpeg) installed on your machine):\n\n ```bash\n python input_pipeline/preprocess/video_to_array_records.py\n ```\n\n> **Note:** This is a large dataset and may take considerable time and storage to download and process.\n\n\n<h2 name=""train"" id=""train"">Quick Start 🚀 </h2>\n\nGenie has three components: a [video tokenizer](models/tokenizer.py), a [latent action model](models/lam.py), and a [dynamics model](models/dynamics.py). Each of these components are trained separately, however, the dynamics model requires a pre-trained video tokenizer (and latent action model).\n\nTo train the video tokenizer, run:\n\n```bash\npython train_tokenizer.py --ckpt_dir <path>\n```\n\nTo train the latent action model, run:\n\n```bash\npython train_lam.py --ckpt_dir <path>\n```\n\nOnce the tokenizer and LAM are trained, the dynamics model can be trained with:\n\n```bash\npython train_dynamics.py --tokenizer_checkpoint <path> --lam_checkpoint <path>\n```\n\nLogging with `wandb` is supported. To enable logging, set the `WANDB_API_KEY` environment variable or run:\n\n```bash\nwandb login\n```\n\nTraining can then be logged by setting the `--log` flag:\n\n```bash\npython train_tokenizer.py --log --entity <wandb-entity> --project <wandb-project>\n```\n\n<h2 name=""cite"" id=""cite"">Citing 📜 </h2>\n\nJasmine was built by [Mihir Mahajan](https://maharajamihir.github.io/), [Alfred Nguyen](https://avocadoali.github.io/) and [Franz Srambical](https://srambical.fr/), but started as a fork of [Jafar](https://github.com/flairox/jafar), built by [Matthew Jackson](https://matthewtjackson.com) and [Timon Willi](https://www.timonwilli.com).\n\nIf you use Jasmine in your work, please cite us, Jafar, and the original Genie paper as follows:\n\n```\n@article{\n mahajan2025jasmine,\n title={Jasmine: A simple, performant and scalable JAX-based world modeling codebase},\n author={Mihir Mahajan and Alfred Nguyen and Franz Srambical and Stefan Bauer},\n journal = {p(doom) blog},\n year={2025},\n url={https://pdoom.org/jasmine.html},\n note = {https://pdoom.org/blog.html}\n}\n```\n```\n@inproceedings{\n willi2024jafar,\n title={Jafar: An Open-Source Genie Reimplemention in Jax},\n author={Timon Willi and Matthew Thomas Jackson and Jakob Nicolaus Foerster},\n booktitle={First Workshop on Controllable Video Generation @ ICML 2024},\n year={2024},\n url={https://openreview.net/forum?id=ZZGaQHs9Jb}\n}\n```\n```\n@inproceedings{\n bruce2024genie,\n title={Genie: Generative Interactive Environments},\n author={Jake Bruce and Michael D Dennis and Ashley Edwards and Jack Parker-Holder and Yuge Shi and Edward Hughes and Matthew Lai and Aditi Mavalankar and Richie Steigerwald and Chris Apps and Yusuf Aytar and Sarah Maria Elisabeth Bechtle and Feryal Behbahani and Stephanie C.Y. Chan and Nicolas Heess and Lucy Gonzalez and Simon Osindero and Sherjil Ozair and Scott Reed and Jingwei Zhang and Konrad Zolna and Jeff Clune and Nando de Freitas and Satinder Singh and Tim Rockt{\""a}schel},\n booktitle={Forty-first International Conference on Machine Learning},\n year={2024},\n url={https://openreview.net/forum?id=bJbSbJskOS}\n}\n```\n",markdown,tab
|
| 17 |
+
16,55406,"README.md",3938,0,"",markdown,selection_command
|
| 18 |
+
17,73858,"README.md",3783,0,"",markdown,selection_mouse
|
| 19 |
+
18,77177,".gitignore",0,0,"*.pyc\n*.npy\n*.png\n*.gif\n\nwandb_key\ncheckpoints/\nwandb/\n__pycache__/\n",ignore,tab
|
| 20 |
+
19,78799,".gitignore",68,0,"",ignore,selection_command
|
| 21 |
+
20,79159,".gitignore",68,0,"\n",ignore,content
|
| 22 |
+
21,79699,".gitignore",68,1,"",ignore,content
|
| 23 |
+
22,80000,".gitignore",68,0,"e",ignore,content
|
| 24 |
+
23,80158,".gitignore",69,0,"x",ignore,content
|
| 25 |
+
24,80159,".gitignore",70,0,"",ignore,selection_keyboard
|
| 26 |
+
25,80241,".gitignore",70,0,"p",ignore,content
|
| 27 |
+
26,80242,".gitignore",71,0,"",ignore,selection_keyboard
|
| 28 |
+
27,80384,".gitignore",71,0,"e",ignore,content
|
| 29 |
+
28,80385,".gitignore",72,0,"",ignore,selection_keyboard
|
| 30 |
+
29,80429,".gitignore",72,0,"r",ignore,content
|
| 31 |
+
30,80429,".gitignore",73,0,"",ignore,selection_keyboard
|
| 32 |
+
31,80466,".gitignore",73,0,"i",ignore,content
|
| 33 |
+
32,80467,".gitignore",74,0,"",ignore,selection_keyboard
|
| 34 |
+
33,80556,".gitignore",74,0,"m",ignore,content
|
| 35 |
+
34,80556,".gitignore",75,0,"",ignore,selection_keyboard
|
| 36 |
+
35,80613,".gitignore",75,0,"e",ignore,content
|
| 37 |
+
36,80614,".gitignore",76,0,"",ignore,selection_keyboard
|
| 38 |
+
37,80754,".gitignore",76,0,"n",ignore,content
|
| 39 |
+
38,80754,".gitignore",77,0,"",ignore,selection_keyboard
|
| 40 |
+
39,80833,".gitignore",77,0,"t",ignore,content
|
| 41 |
+
40,80834,".gitignore",78,0,"",ignore,selection_keyboard
|
| 42 |
+
41,80844,".gitignore",78,0,"s",ignore,content
|
| 43 |
+
42,80844,".gitignore",79,0,"",ignore,selection_keyboard
|
| 44 |
+
43,81008,".gitignore",79,0,"/",ignore,content
|
| 45 |
+
44,81009,".gitignore",80,0,"",ignore,selection_keyboard
|
| 46 |
+
45,81657,".gitignore",79,0,"",ignore,selection_command
|
| 47 |
+
46,82815,".gitignore",80,0,"\n",ignore,content
|
| 48 |
+
47,83395,".gitignore",81,0,"t",ignore,content
|
| 49 |
+
48,83395,".gitignore",82,0,"",ignore,selection_keyboard
|
| 50 |
+
49,83456,".gitignore",82,0,"e",ignore,content
|
| 51 |
+
50,83456,".gitignore",83,0,"",ignore,selection_keyboard
|
| 52 |
+
51,83540,".gitignore",83,0,"n",ignore,content
|
| 53 |
+
52,83540,".gitignore",84,0,"",ignore,selection_keyboard
|
| 54 |
+
53,83639,".gitignore",84,0,"s",ignore,content
|
| 55 |
+
54,83639,".gitignore",85,0,"",ignore,selection_keyboard
|
| 56 |
+
55,83768,".gitignore",85,0,"o",ignore,content
|
| 57 |
+
56,83769,".gitignore",86,0,"",ignore,selection_keyboard
|
| 58 |
+
57,83769,".gitignore",86,0,"r",ignore,content
|
| 59 |
+
58,83769,".gitignore",87,0,"",ignore,selection_keyboard
|
| 60 |
+
59,84027,".gitignore",87,0,"b",ignore,content
|
| 61 |
+
60,84027,".gitignore",88,0,"",ignore,selection_keyboard
|
| 62 |
+
61,84468,".gitignore",88,0,"o",ignore,content
|
| 63 |
+
62,84469,".gitignore",89,0,"",ignore,selection_keyboard
|
| 64 |
+
63,84470,".gitignore",89,0,"a",ignore,content
|
| 65 |
+
64,84470,".gitignore",90,0,"",ignore,selection_keyboard
|
| 66 |
+
65,84470,".gitignore",90,0,"r",ignore,content
|
| 67 |
+
66,84471,".gitignore",91,0,"",ignore,selection_keyboard
|
| 68 |
+
67,84491,".gitignore",91,0,"d",ignore,content
|
| 69 |
+
68,84492,".gitignore",92,0,"",ignore,selection_keyboard
|
| 70 |
+
69,85408,".gitignore",92,0,"/",ignore,content
|
| 71 |
+
70,85408,".gitignore",93,0,"",ignore,selection_keyboard
|
| 72 |
+
71,85643,".gitignore",92,0,"",ignore,selection_command
|
| 73 |
+
72,96459,"test/test_jax.py",0,0,"# import jax\n# with jax.profiler.trace(""tensorboard""):\n # key = jax.random.key(0)\n # x = jax.random.normal(key, (1024, 1024))\n # y = x @ x\n # y.block_until_ready()\n\nimport datetime\nimport jax.numpy as jnp\nimport jax\n\nMATRIX_DIM = 32768\nSTEPS = 10\n\nA = jnp.ones((MATRIX_DIM, MATRIX_DIM), dtype=jnp.float32)\nB = jnp.ones((MATRIX_DIM, MATRIX_DIM), dtype=jnp.float32)\n\nnum_bytes = A.size * 4\ntotal_num_bytes_crossing_to_hbm = num_bytes * 3\n\ntotal_num_flops = 2 * MATRIX_DIM * MATRIX_DIM**2\n\ndef matmul(A, B):\n return A @ B\n\nmatmul(A, B) # warmup\n\nwith jax.profiler.trace(""tensorboard""):\n start_time = datetime.datetime.now()\n for i in range(STEPS):\n C = A @ B\n C.block_until_ready()\n end_time = datetime.datetime.now()\n\naverage_time_per_step = (end_time - start_time).total_seconds() / STEPS\n\nprint(f""{average_time_per_step}, teraflops per second: {total_num_flops / average_time_per_step / 1e12}, gigabytes per second: {total_num_bytes_crossing_to_hbm / average_time_per_step / 1e9}"")\n\n",python,tab
|
| 74 |
+
73,97507,"test/test_torch.py",0,0,"import datetime\nimport torch\n\ntorch.backends.cuda.matmul.allow_tf32 = True\n\nMATRIX_DIM = 32768\nSTEPS = 10\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nA = torch.ones((MATRIX_DIM, MATRIX_DIM), device=device, dtype=torch.float32)\nB = torch.ones((MATRIX_DIM, MATRIX_DIM), device=device, dtype=torch.float32)\n\nnum_bytes = A.numel() * A.element_size()\ntotal_num_bytes_crossing_to_hbm = num_bytes * 3\n\ntotal_num_flops = 2 * MATRIX_DIM * MATRIX_DIM**2\n\ndef matmul(X, Y):\n return X @ Y\n\n# warmup\nwith torch.no_grad():\n _ = matmul(A, B)\n if device.type == 'cuda':\n torch.cuda.synchronize()\n\nstart_time = datetime.datetime.now()\nwith torch.no_grad():\n for i in range(STEPS):\n C = A @ B\n if device.type == 'cuda':\n torch.cuda.synchronize()\nend_time = datetime.datetime.now()\n\naverage_time_per_step = (end_time - start_time).total_seconds() / STEPS\n\nprint(f""{average_time_per_step}, teraflops per second: {total_num_flops / average_time_per_step / 1e12}, gigabytes per second: {total_num_bytes_crossing_to_hbm / average_time_per_step / 1e9}"")\n",python,tab
|
| 75 |
+
74,99140,"test/test_torch.py",332,0,"",python,selection_mouse
|
| 76 |
+
75,99847,"test/test_jax.py",0,0,"",python,tab
|
| 77 |
+
76,100710,".gitignore",0,0,"",ignore,tab
|
| 78 |
+
77,101277,".gitignore",93,0,"\n",ignore,content
|
| 79 |
+
78,101496,".gitignore",94,0,"t",ignore,content
|
| 80 |
+
79,101496,".gitignore",95,0,"",ignore,selection_keyboard
|
| 81 |
+
80,101541,".gitignore",95,0,"e",ignore,content
|
| 82 |
+
81,101541,".gitignore",96,0,"",ignore,selection_keyboard
|
| 83 |
+
82,101599,".gitignore",96,0,"s",ignore,content
|
| 84 |
+
83,101599,".gitignore",97,0,"",ignore,selection_keyboard
|
| 85 |
+
84,101692,".gitignore",97,0,"t",ignore,content
|
| 86 |
+
85,101692,".gitignore",98,0,"",ignore,selection_keyboard
|
| 87 |
+
86,101778,".gitignore",98,0,"/",ignore,content
|
| 88 |
+
87,101778,".gitignore",99,0,"",ignore,selection_keyboard
|
| 89 |
+
88,102064,".gitignore",98,0,"",ignore,selection_command
|
| 90 |
+
89,107866,"requirements.txt",0,0,"# core requirements\ndm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.10.7\njax[cuda12]>=0.6.2\noptax>=0.2.3\ntyro>=0.8.5\nwandb>=0.17.4\ngrain>=0.2.10\narray-record>=0.7.2\n\n# data pipeline\ngsutil>=5.35\nffmpeg-python==0.2.0\nhf-transfer==0.1.9\nhuggingface-hub[cli]>=0.34.3\nprocgen>=0.10.7\ntqdm>=4.67.1\n\n# dev\npre-commit>=4.2.0",pip-requirements,tab
|
| 91 |
+
90,107894,"requirements.txt",154,0,"",pip-requirements,selection_command
|
| 92 |
+
91,113045,"requirements.txt",150,0,"",pip-requirements,selection_mouse
|
| 93 |
+
92,113694,".gitignore",0,0,"",ignore,tab
|
| 94 |
+
93,120715,"TERMINAL",0,0,"",,terminal_command
|
| 95 |
+
94,122824,"test.py",0,0,"import jax\nimport jax.numpy as jnp\nimport numpy as np\nimport functools\n\n@jax.jit\ndef matmul(a, b):\n return jnp.matmul(a, b)\n\nMATRIX_SIZE = 1024*12\na = jnp.ones((MATRIX_SIZE, MATRIX_SIZE))\nb = jnp.ones((MATRIX_SIZE, MATRIX_SIZE))\n\ncompiled_matmul = matmul.lower(a, b).compile()\n\ndef print_compiled_memory_stats(compiled_stats):\n """"""Prints a summary of the compiled memory statistics.""""""\n if compiled_stats is None:\n return\n\n def bytes_to_gb(num_bytes):\n return num_bytes / (1024**3)\n\n output_gb = bytes_to_gb(compiled_stats.output_size_in_bytes)\n temp_gb = bytes_to_gb(compiled_stats.temp_size_in_bytes)\n argument_gb = bytes_to_gb(compiled_stats.argument_size_in_bytes)\n alias_gb = bytes_to_gb(compiled_stats.alias_size_in_bytes)\n host_temp_gb = bytes_to_gb(compiled_stats.host_temp_size_in_bytes)\n total_gb = output_gb + temp_gb + argument_gb - alias_gb\n\n print(\n f""Total memory size: {total_gb:.1f} GB, Output size: {output_gb:.1f} GB, Temp size: {temp_gb:.1f} GB, ""\n f""Argument size: {argument_gb:.1f} GB, Host temp size: {host_temp_gb:.1f} GB.""\n )\n\nprint_compiled_memory_stats(compiled_matmul.memory_analysis())",python,tab
|
| 96 |
+
95,139273,"test.py",406,0,"",python,selection_mouse
|
| 97 |
+
96,140052,"test.py",410,0,"",python,selection_command
|
| 98 |
+
97,140278,"test.py",413,0,"",python,selection_command
|
| 99 |
+
98,141132,".gitignore",0,0,"",ignore,tab
|
| 100 |
+
99,141750,".gitignore",99,0,"\n",ignore,content
|
| 101 |
+
100,142062,".gitignore",100,0,"d",ignore,content
|
| 102 |
+
101,142062,".gitignore",101,0,"",ignore,selection_keyboard
|
| 103 |
+
102,142076,".gitignore",101,0,"a",ignore,content
|
| 104 |
+
103,142076,".gitignore",102,0,"",ignore,selection_keyboard
|
| 105 |
+
104,142159,".gitignore",102,0,"t",ignore,content
|
| 106 |
+
105,142159,".gitignore",103,0,"",ignore,selection_keyboard
|
| 107 |
+
106,142241,".gitignore",103,0,"a",ignore,content
|
| 108 |
+
107,142241,".gitignore",104,0,"",ignore,selection_keyboard
|
| 109 |
+
108,142600,".gitignore",104,0,"_",ignore,content
|
| 110 |
+
109,142600,".gitignore",105,0,"",ignore,selection_keyboard
|
| 111 |
+
110,142968,".gitignore",105,0,"a",ignore,content
|
| 112 |
+
111,142968,".gitignore",106,0,"",ignore,selection_keyboard
|
| 113 |
+
112,142977,".gitignore",106,0,"r",ignore,content
|
| 114 |
+
113,142977,".gitignore",107,0,"",ignore,selection_keyboard
|
| 115 |
+
114,143114,".gitignore",107,0,"r",ignore,content
|
| 116 |
+
115,143115,".gitignore",108,0,"",ignore,selection_keyboard
|
| 117 |
+
116,143129,".gitignore",108,0,"a",ignore,content
|
| 118 |
+
117,143129,".gitignore",109,0,"",ignore,selection_keyboard
|
| 119 |
+
118,143218,".gitignore",109,0,"y",ignore,content
|
| 120 |
+
119,143219,".gitignore",110,0,"",ignore,selection_keyboard
|
| 121 |
+
120,143439,".gitignore",110,0,"record/",ignore,content
|
| 122 |
+
121,143716,".gitignore",116,0,"",ignore,selection_command
|
| 123 |
+
122,148838,".gitignore",0,0,"",ignore,tab
|
| 124 |
+
123,150619,"salient_restore_failing_minimal_example.py",0,0,"import os\nimport shutil\n\nimport jax.numpy as jnp\nfrom flax import nnx\nimport orbax.checkpoint as ocp\n\n# The hunch is that parameters like biases in Linear layers are only created if\n# they are used (e.g. `use_bias=True`). When checkpointing a model created with\n# `use_bias=False`, the bias parameter is not saved.\n#\n# Attempting to restore this checkpoint into a model instance created with\n# `use_bias=True` is expected to fail, as the checkpointed state has a\n# different structure (missing the 'bias' leaf in the PyTree) from the target model.\n\nckpt_dir = '/tmp/flax_nnx_bug_ckpt'\nif os.path.exists(ckpt_dir):\n shutil.rmtree(ckpt_dir)\nos.makedirs(ckpt_dir)\n\n\nclass MyModel(nnx.Module):\n """"""A simple model containing a Linear layer.""""""\n\n def __init__(self, use_bias: bool, *, rngs: nnx.Rngs):\n self.linear = nnx.Linear(10, 20, use_bias=use_bias, rngs=rngs)\n\n def __call__(self, x):\n return self.linear(x)\n\n\nrngs = nnx.Rngs(0)\nmodel_no_bias = MyModel(use_bias=False, rngs=rngs)\n\ndummy_input = jnp.ones((1, 10))\nmodel_no_bias(dummy_input)\n\nstate_no_bias, _ = nnx.split(model_no_bias)\nprint('State of model_no_bias:', state_no_bias)\n\n# Save the checkpoint.\ncheckpointer = ocp.StandardCheckpointer()\ncheckpointer.save(ckpt_dir, model_no_bias)\nprint(f""Checkpoint saved to '{ckpt_dir}'.\n"")\n\n\n# 3. Instantiate a new model with use_bias=True.\nprint('Step 2: Creating a new model with use_bias=True.')\nmodel_with_bias = MyModel(use_bias=True, rngs=rngs)\n\n# Initialize parameters. This will create the 'bias' parameter.\nmodel_with_bias(dummy_input)\n\n# Display the state. Note the presence of 'bias'.\nstate_with_bias, _ = nnx.split(model_with_bias)\nprint('State of model_with_bias (before restore):', state_with_bias)\nprint('')\n\n\n# 4. Attempt to restore the checkpoint into the new model.\n# This is expected to fail because the checkpoint is missing the 'bias' parameter\n# that exists in 'model_with_bias'.\nprint('Step 3: Attempting to restore the checkpoint.')\ntry:\n checkpoints.restore_checkpoint(model_with_bias, ckpt_dir)\n print('Restore successful. (This was not expected)')\nexcept Exception as e:\n print('Restore failed as expected.')\n print(f'Error: {e}')\nfinally:\n # Clean up the checkpoint directory.\n shutil.rmtree(ckpt_dir)\n",python,tab
|
| 125 |
+
124,155580,"generate_arrayrecord_dataset.py",0,0,"",python,tab
|
| 126 |
+
125,156272,"generate_arrayrecord_dataset.py",266,0,"",python,selection_mouse
|
| 127 |
+
126,156273,"generate_arrayrecord_dataset.py",265,0,"",python,selection_command
|
| 128 |
+
127,158059,"generate_arrayrecord_dataset.py",1033,0,"",python,selection_keyboard
|
| 129 |
+
128,158189,"generate_arrayrecord_dataset.py",1963,0,"",python,selection_keyboard
|
| 130 |
+
129,158339,"generate_arrayrecord_dataset.py",1033,0,"",python,selection_keyboard
|
| 131 |
+
130,158489,"generate_arrayrecord_dataset.py",265,0,"",python,selection_keyboard
|
| 132 |
+
131,158620,"generate_arrayrecord_dataset.py",0,0,"",python,selection_keyboard
|
| 133 |
+
132,159326,"salient_restore_failing_minimal_example.py",0,0,"",python,tab
|
| 134 |
+
133,173104,"salient_restore_failing_minimal_example.py",311,0,"",python,selection_mouse
|
| 135 |
+
134,230096,"salient_restore_failing_minimal_example.py",0,0,"Switched from branch 'main' to 'huggingface-download-in-folder'",python,git_branch_checkout
|
| 136 |
+
135,234046,".gitignore",0,0,"*.pyc\n*.npy\n*.png\n*.gif\n\nwandb_key\ncheckpoints/\nwandb/\n__pycache__/\nexperiments/\ntensorboard/\ntest/\ndata_arrayrecord/",ignore,tab
|
| 137 |
+
136,243891,".gitignore",23,0,"",ignore,selection_mouse
|
| 138 |
+
137,243892,".gitignore",22,0,"",ignore,selection_command
|
| 139 |
+
138,245025,"input_pipeline/download/huggingface/download_openai_array_records.sh",0,0,"#!/bin/bash\n\n# Download and extract array records from Hugging Face\n# \n# This script performs a two-step process:\n# 1. Downloads compressed array records from a Hugging Face dataset repository\n# 2. Extracts the compressed tar files in parallel for better performance\n#\n# Usage:\n# ./download_openai_array_records.sh [hf_download_dir] [final_dataset_dir]\n#\n# Arguments:\n# hf_download_dir - Directory to store compressed downloads (default: data/minecraft_arrayrecords_compressed)\n# final_dataset_dir - Directory for extracted array records (default: data/minecraft_arrayrecords)\n\n# Set default directories if not provided as arguments\nhf_download_dir=""${1:-data/minecraft_arrayrecords_compressed}"" \nfinal_dataset_dir=""${2:-data/minecraft_arrayrecords}"" \n\nmkdir -p $hf_download_dir\nmkdir -p $final_dataset_dir\n\n# Step 1: Download compressed dataset from Hugging Face\necho ""Starting download from Hugging Face...""\nrepo_id=p-doom/open_ai_minecraft_arrayrecords_chunked\nstart_time_hf_download=$(date +%s)\n\nHF_HUB_ENABLE_HF_TRANSFER=1 HF_HUB_DISABLE_SYMLINKS=1 \\nhuggingface-cli download --repo-type dataset $repo_id --local-dir $hf_download_dir\n\nend_time_hf_download=$(date +%s)\necho ""Download completed. Time taken: $((end_time_hf_download - start_time_hf_download)) seconds""\n\n# Step 2: Extract compressed array records in parallel\necho ""Starting parallel extraction of tar files...""\nnum_workers=64 # Number of parallel extraction processes\nstart_time_uncompress=$(date +%s)\n\n# Find all shard tar files and extract them in parallel:\nxargs -0 -P $num_workers -I {} bash -c 'echo ""Extracting {}""; tar -xf ""{}"" -C ""'$final_dataset_dir'""'\n\nend_time_uncompress=$(date +%s)\n\n# Display timing summary\necho ""================================""\necho ""Extraction completed successfully!""\necho ""Uncompress time: $((end_time_uncompress - start_time_uncompress)) seconds""\necho ""Download time: $((end_time_hf_download - start_time_hf_download)) seconds""\necho ""Total time: $((end_time_uncompress - start_time_hf_download)) seconds""\necho ""Final dataset location: $final_dataset_dir""\n",shellscript,tab
|
| 140 |
+
139,517268,"README.md",0,0,"<h1 align=""center"">🧞♀️ Jasmine: A simple, performant and scalable JAX-based world modeling codebase 🧞♀️</h1>\n\n<p align=""center"">\n <a href= ""https://github.com/FLAIROx/jafar/blob/main/LICENSE"">\n <img src=""https://img.shields.io/badge/license-Apache2.0-blue.svg"" /></a>\n <a href= ""https://github.com/psf/black"">\n <img src=""https://img.shields.io/badge/code%20style-black-000000.svg"" /></a>\n</p>\n\nJasmine is a production-ready JAX-based world modeling codebase. It currently implements the high-level architecture of [Genie: Generative Interactive Environments](https://arxiv.org/abs/2402.15391) (Bruce et al., 2024) with [MaskGIT](https://arxiv.org/abs/2202.04200) (Chang et al., 2022), as well as an autoregressive (causal) baseline. A diffusion baseline is coming soon.\n\nJasmine scales from single hosts to hundreds of xPUs thanks to XLA and strives to be an easily hackable, batteries-included foundation for world modeling research.\n\n<h2 name=""overview"" id=""overview"">Overview</h2>\n\n- Asynchronous & distributed checkpointing thanks to [orbax.checkpoint](https://github.com/google/orbax)\n - Jasmine also supports mixing and matching hardware topologies (e.g. train on four nodes, load the checkpoint on a single node)\n- Optimized dataloading thanks to [Grain](https://github.com/google/grain)\n - Dataloading scales with the number of processes (i.e. nodes/xPUs)\n- Checkpointing of model weights, optimizer and dataloader states\n- Full reproducibility with **identical** training curves (thanks to seeded dataloading and training, and [JAX' approach to pseudo random numbers](https://docs.jax.dev/en/latest/random-numbers.html))\n- Automatic checkpoint deletion/retention according to specified retention policy thanks to `orbax.checkpoint.CheckpointManager`\n- Mixed precision training using `bfloat16`\n - `int8` training is on the roadmap via [aqt](https://github.com/google/aqt)\n- FlashAttention thanks to [cuDNN SDPA](https://github.com/jax-ml/jax/blob/a155c5a9997924170e0067d552351a9833c12c11/jax/_src/cudnn/fused_attention_stablehlo.py#L842)\n- Frame-level KV cache resets for accelerated spatiotemporal attention in causal baseline (still in PR)\n- Activation checkpointing (even onto host memory if desired)\n- DDP (changing to FSDP requires changing **a single line of code**)\n- WSD learning rate schedule\n - No need to retrain from scratch if you want to train for longer\n- Index-shuffling during dataloading\n- Google-native stack\n - https://github.com/google/orbax for checkpointing\n - https://github.com/google/grain for dataloading\n - https://github.com/google-deepmind/dm_pix for image manipulation\n - https://github.com/google/array_record as the data format\n- Easy model inspection thanks to [treescope](https://github.com/google-deepmind/treescope)\n- Modularized training script for easy inspection using notebooks ([demo notebook](https://colab.research.google.com/drive/1zHkciFIZxXloJgue9F5LtFlA0m00rJIf?usp=sharing))\n- Easy model surgery thanks to the new [flax.nnx](https://flax.readthedocs.io/en/latest/migrating/linen_to_nnx.html) API\n- [Shape suffixes](https://medium.com/@NoamShazeer/shape-suffixes-good-coding-style-f836e72e24fd) throughout the repository\n\n<h2 name=""start"" id=""start"">Setup 🧗</h2>\n\nJasmine requires `python 3.10`, `jax 0.6.2`, and `flax 0.10.7`. To install the requirements, run:\n\n```bash\npip install -r requirements.txt\npre-commit install\n```\n\n---\n\n<h2 name=""dataset"" id=""dataset"">Dataset 📂</h2>\n\nYou can either download our preprocessed dataset from [Hugging Face](https://huggingface.co/datasets/p-doom/open_ai_minecraft_arrayrecords_chunked) or preprocess [OpenAI's VPT dataset](https://github.com/openai/Video-Pre-Training) manually.\n\n### Option 1: Use Preprocessed Dataset (Recommended)\n\nThe easiest way to get started is to download our preprocessed dataset from Hugging Face. This script will handle downloading and extracting it:\n\n```bash\nbash input_pipeline/download/huggingface/download_openai_array_records.sh\n```\n\n---\n\n### Option 2: Manual Download & Preprocessing of OpenAI's VPT Dataset\n\nIf you prefer to use the raw VPT dataset from OpenAI and preprocess it yourself, follow these steps:\n\n1. **Download index files:**\n This will download the initial index file:\n\n ```bash\n bash input_pipeline/download/openai/download_index_files.sh\n ```\n\n2. **Download from all index files:**\n This may take a long time depending on your bandwidth:\n\n ```bash\n python input_pipeline/download/openai/download_videos.py --index_file_path data/open_ai_index_files/all_7xx_Apr_6.json\n python input_pipeline/download/openai/download_videos.py --index_file_path data/open_ai_index_files/all_8xx_Jun_29.json\n python input_pipeline/download/openai/download_videos.py --index_file_path data/open_ai_index_files/all_9xx_Jun_29.json\n python input_pipeline/download/openai/download_videos.py --index_file_path data/open_ai_index_files/all_10xx_Jun_29.json\n ```\n\n3. **Preprocess videos into ArrayRecords:**\n For efficient distributed training, convert the raw videos into the arrayrecord format (make sure to have [ffmpeg](https://github.com/FFmpeg/FFmpeg) installed on your machine):\n\n ```bash\n python input_pipeline/preprocess/video_to_array_records.py\n ```\n\n> **Note:** This is a large dataset and may take considerable time and storage to download and process.\n\n\n<h2 name=""train"" id=""train"">Quick Start 🚀 </h2>\n\nGenie has three components: a [video tokenizer](models/tokenizer.py), a [latent action model](models/lam.py), and a [dynamics model](models/dynamics.py). Each of these components are trained separately, however, the dynamics model requires a pre-trained video tokenizer (and latent action model).\n\nTo train the video tokenizer, run:\n\n```bash\npython train_tokenizer.py --ckpt_dir <path>\n```\n\nTo train the latent action model, run:\n\n```bash\npython train_lam.py --ckpt_dir <path>\n```\n\nOnce the tokenizer and LAM are trained, the dynamics model can be trained with:\n\n```bash\npython train_dynamics.py --tokenizer_checkpoint <path> --lam_checkpoint <path>\n```\n\nLogging with `wandb` is supported. To enable logging, set the `WANDB_API_KEY` environment variable or run:\n\n```bash\nwandb login\n```\n\nTraining can then be logged by setting the `--log` flag:\n\n```bash\npython train_tokenizer.py --log --entity <wandb-entity> --project <wandb-project>\n```\n\n<h2 name=""cite"" id=""cite"">Citing 📜 </h2>\n\nJasmine was built by [Mihir Mahajan](https://maharajamihir.github.io/), [Alfred Nguyen](https://avocadoali.github.io/) and [Franz Srambical](https://srambical.fr/), but started as a fork of [Jafar](https://github.com/flairox/jafar), built by [Matthew Jackson](https://matthewtjackson.com) and [Timon Willi](https://www.timonwilli.com).\n\nIf you use Jasmine in your work, please cite us, Jafar, and the original Genie paper as follows:\n\n```\n@article{\n mahajan2025jasmine,\n title={Jasmine: A simple, performant and scalable JAX-based world modeling codebase},\n author={Mihir Mahajan and Alfred Nguyen and Franz Srambical and Stefan Bauer},\n journal = {p(doom) blog},\n year={2025},\n url={https://pdoom.org/jasmine.html},\n note = {https://pdoom.org/blog.html}\n}\n```\n```\n@inproceedings{\n willi2024jafar,\n title={Jafar: An Open-Source Genie Reimplemention in Jax},\n author={Timon Willi and Matthew Thomas Jackson and Jakob Nicolaus Foerster},\n booktitle={First Workshop on Controllable Video Generation @ ICML 2024},\n year={2024},\n url={https://openreview.net/forum?id=ZZGaQHs9Jb}\n}\n```\n```\n@inproceedings{\n bruce2024genie,\n title={Genie: Generative Interactive Environments},\n author={Jake Bruce and Michael D Dennis and Ashley Edwards and Jack Parker-Holder and Yuge Shi and Edward Hughes and Matthew Lai and Aditi Mavalankar and Richie Steigerwald and Chris Apps and Yusuf Aytar and Sarah Maria Elisabeth Bechtle and Feryal Behbahani and Stephanie C.Y. Chan and Nicolas Heess and Lucy Gonzalez and Simon Osindero and Sherjil Ozair and Scott Reed and Jingwei Zhang and Konrad Zolna and Jeff Clune and Nando de Freitas and Satinder Singh and Tim Rockt{\""a}schel},\n booktitle={Forty-first International Conference on Machine Learning},\n year={2024},\n url={https://openreview.net/forum?id=bJbSbJskOS}\n}\n```\n",markdown,tab
|
| 141 |
+
140,605519,"input_pipeline/download/dqn_replay/download_pngs.py",0,0,"",python,tab
|
| 142 |
+
141,665641,"input_pipeline/download/dqn_replay/download_pngs.py",0,0,"",python,tab
|
| 143 |
+
142,717248,"input_pipeline/download/openai/download_videos.py",0,0,"import json\nimport requests\nimport os\nimport tyro\nimport logging\nfrom urllib.parse import urljoin\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom multiprocessing import Pool, cpu_count\nimport time\n\n\n@dataclass\nclass DownloadVideos:\n index_file_path: str = ""data/open_ai_index_files/all_6xx_Jun_29.json""\n num_workers: int = -1 # -1 means use all available cores\n output_dir: str = ""data/minecraft_videos/""\n\n\ndef download_single_file(args):\n """"""Download a single file - designed to be used with multiprocessing""""""\n relpath, url, output_path = args\n\n if os.path.exists(output_path):\n return f""Skipped {relpath} (already exists)""\n\n # No need to create parent directories since we're flattening the structure\n try:\n response = requests.get(url, stream=True, timeout=30)\n if response.status_code == 200:\n file_size = 0\n with open(output_path, ""wb"") as f:\n for chunk in response.iter_content(chunk_size=8192):\n if chunk:\n f.write(chunk)\n file_size += len(chunk)\n\n # Convert to MB for logging\n file_size_mb = file_size / (1024 * 1024)\n return f""Downloaded {relpath} ({file_size_mb:.2f} MB)""\n else:\n return f""Failed to download {relpath}: HTTP {response.status_code}""\n except requests.exceptions.RequestException as e:\n return f""Request failed for {relpath}: {e}""\n except Exception as e:\n return f""Unexpected error downloading {relpath}: {e}""\n\n\ndef flatten_path(relpath):\n """"""Convert nested path to flattened filename with subdirectory as prefix\n e.g. data/6.10/filename.mp4 -> 6.10_filename.mp4\n """"""\n\n parts = relpath.split(""/"")\n\n if len(parts) >= 3:\n subdir = parts[1]\n filename = parts[2]\n return f""{subdir}_{filename}""\n else:\n return relpath.replace(""/"", ""_"")\n\n\ndef download_dataset(index_file_path, output_dir, num_workers=64):\n # Load the index file\n with open(index_file_path, ""r"") as f:\n index_data = json.load(f)\n\n basedir = index_data[""basedir""]\n relpaths = index_data[""relpaths""]\n\n # Filter for mp4 files only and flatten the path structure\n mp4_files = []\n for relpath in relpaths:\n if relpath.endswith("".mp4""):\n url = urljoin(basedir, relpath)\n flattened_filename = flatten_path(relpath)\n output_path = os.path.join(output_dir, flattened_filename)\n mp4_files.append((relpath, url, output_path))\n\n print(f""Found {len(mp4_files)} MP4 files to download"")\n print(f""Using {num_workers} workers for parallel downloads"")\n\n start_time = time.time()\n\n if num_workers > len(mp4_files):\n num_workers = len(mp4_files)\n\n with tqdm(\n total=len(mp4_files), desc=""Overall Download Progress"", unit=""files""\n ) as pbar:\n with Pool(processes=num_workers) as pool:\n results = []\n for result in pool.imap_unordered(\n download_single_file,\n [\n (relpath, url, output_path)\n for relpath, url, output_path in mp4_files\n ],\n ):\n results.append(result)\n pbar.update(1)\n # Print final results summary\n successful_downloads = sum(1 for r in results if ""Downloaded"" in r)\n skipped_files = sum(1 for r in results if ""Skipped"" in r)\n failed_downloads = len(results) - successful_downloads - skipped_files\n\n print(f""\nDownload Summary:"")\n print(f"" Successful downloads: {successful_downloads}"")\n print(f"" Skipped files: {skipped_files}"")\n print(f"" Failed downloads: {failed_downloads}"")\n\n end_time = time.time()\n total_time = end_time - start_time\n print(f""Download completed in {total_time:.2f} seconds"")\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(DownloadVideos)\n os.makedirs(args.output_dir, exist_ok=True)\n\n if args.num_workers == -1:\n args.num_workers = cpu_count()\n\n print(f""Index file path: {args.index_file_path}"")\n print(f""Output directory: {args.output_dir}"")\n print(f""Number of workers: {args.num_workers}"")\n\n download_dataset(args.index_file_path, args.output_dir, args.num_workers)\n",python,tab
|
| 144 |
+
143,719618,"input_pipeline/download/openai/download_videos.py",882,0,"",python,selection_keyboard
|
| 145 |
+
144,720063,"input_pipeline/download/openai/download_videos.py",1958,0,"",python,selection_keyboard
|
| 146 |
+
145,720725,"input_pipeline/download/openai/download_videos.py",2969,0,"",python,selection_keyboard
|
| 147 |
+
146,721439,"input_pipeline/download/openai/download_videos.py",4063,0,"",python,selection_keyboard
|
| 148 |
+
147,722157,"input_pipeline/download/openai/download_videos.py",4299,0,"",python,selection_keyboard
|
| 149 |
+
148,723162,"input_pipeline/download/openai/download_videos.py",4221,0,"",python,selection_command
|
| 150 |
+
149,723360,"input_pipeline/download/openai/download_videos.py",4220,0,"",python,selection_command
|
| 151 |
+
150,723566,"input_pipeline/download/openai/download_videos.py",4168,0,"",python,selection_command
|
| 152 |
+
151,729047,"input_pipeline/download/openai/download_videos.py",3992,0,"",python,selection_mouse
|
| 153 |
+
152,730860,"input_pipeline/download/dqn_replay/download_pngs.py",0,0,"",python,tab
|
| 154 |
+
153,745951,"input_pipeline/download/dqn_replay/download_pngs.py",0,0,"",python,tab
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-38901971-8972-4ee9-90a3-96f537861f5b1761393531768-2025_10_25-13.59.11.993/source.csv
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"crowd-pilot/serialize_dataset_array_record.py",0,0,"#!/usr/bin/env python3\n""""""\nCSV sessions -> ArrayRecord shards for MaxText Grain pretraining.\n""""""\n\nfrom __future__ import annotations\n\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import List, Tuple, cast\nimport random\n\nimport pandas as pd\n\n# ArrayRecord\nfrom array_record.python import array_record_module as arm\n\nimport tensorflow as tf \nfrom serialize_common import (\n SerializeConfig,\n _session_to_transcript,\n _discover_local_sessions,\n _chunk_text,\n)\n\n\ndef to_array_record(\n cfg: SerializeConfig,\n) -> None:\n os.makedirs(cfg.output_dir, exist_ok=True)\n\n required_cols = [""Sequence"", ""Time"", ""File"", ""RangeOffset"", ""RangeLength"", ""Text"", ""Language"", ""Type""]\n\n # Build a list of per-session DataFrames with file paths\n session_dataframes: List[Tuple[pd.DataFrame, str]] = []\n root = Path(cast(str, cfg.csv_root)).expanduser().resolve()\n csv_files = _discover_local_sessions(root)\n assert csv_files, f""No CSV files found under {root}""\n for csv_file in csv_files:\n df = pd.read_csv(csv_file)\n missing_local = [c for c in required_cols if c not in df.columns]\n assert not missing_local, f""Missing required CSV columns in {csv_file}: {missing_local}""\n session_dataframes.append((df, str(csv_file)))\n\n random.seed(42)\n session_dataframes = [(df, path) for df, path in session_dataframes]\n random.shuffle(session_dataframes)\n \n total_sessions = len(session_dataframes)\n val_count = int(total_sessions * cfg.val_ratio)\n train_count = total_sessions - val_count\n\n train_rows = 0\n val_rows = 0\n train_shard_idx = 0\n val_shard_idx = 0\n docs_written = 0\n\n def write_shard(chunks: List[str], split: str, shard_idx: int) -> int:\n if not chunks:\n return 0\n out_path = Path(cfg.output_dir) / f""{split}_{shard_idx:05d}.array_record""\n group_size = cfg.arrayrecord_group_size\n options = f""group_size:{group_size}""\n writer = arm.ArrayRecordWriter(str(out_path), options)\n try:\n for chunk in chunks:\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n ""text"": tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[chunk.encode(""utf-8"")])\n )\n }\n )\n )\n writer.write(example.SerializeToString())\n finally:\n writer.close()\n return len(chunks)\n\n for i, (session_df, session_path) in enumerate(session_dataframes):\n session_df = pd.DataFrame(session_df.copy())\n transcript = _session_to_transcript(\n session_df,\n long_pause_threshold_ms=cfg.long_pause_threshold_ms,\n )\n # Skip sessions that are too short\n if len(transcript.strip()) < cfg.min_session_chars:\n print(f""Skipping session {session_path} because it's too short ({len(transcript.strip())} chars)"")\n continue\n chunks = _chunk_text(transcript, cfg.target_chars, cfg.overlap_chars)\n if not chunks:\n continue\n docs_written += len(chunks)\n \n # Write chunks to appropriate split based on position\n if i < train_count:\n rows_written = write_shard(chunks, ""train"", train_shard_idx)\n train_rows += rows_written\n train_shard_idx += 1\n else:\n rows_written = write_shard(chunks, ""val"", val_shard_idx)\n val_rows += rows_written\n val_shard_idx += 1\n \n if cfg.max_docs and docs_written >= cfg.max_docs:\n break\n\n print(f""Wrote {train_rows} train and {val_rows} val documents to {cfg.output_dir}"")\n\n\ndef parse_args() -> SerializeConfig:\n p = argparse.ArgumentParser(description=""Serialize HF CSV sessions to ArrayRecord for MaxText Grain"")\n p.add_argument(""--csv_root"", type=str, required=True, help=""Root directory containing per-session CSV files"")\n p.add_argument(""--output_dir"", type=str, required=True, help=""Output directory for ArrayRecord shards"")\n p.add_argument(""--shard_size"", type=int, default=20000, help=""Rows per shard (currently one session per shard)"")\n # FIXME(f.srambical): It is awkward that the target number is in character-space instead of in token-space.\n p.add_argument(""--target_chars"", type=int, default=8192, help=""Target characters per document chunk. This should be ~3-4x the max token length of the model you are using."")\n p.add_argument(""--overlap_chars"", type=int, default=128, help=""Character overlap between chunks"")\n p.add_argument(""--min_session_chars"", type=int, default=1024, help=""Minimum characters to keep a session"")\n p.add_argument(""--max_docs"", type=int, default=None, help=""Stop after writing this many unique docs"")\n p.add_argument(""--long_pause_threshold_ms"", type=int, default=120000, help=""Threshold (ms) to annotate long pauses and emit a keyframe"")\n p.add_argument(""--val_ratio"", type=float, default=0.10, help=""Fraction of sessions to route to validation [0,1)"")\n p.add_argument(""--arrayrecord_group_size"", type=int, default=1, help=""ArrayRecord group_size option controlling index granularity and compression grouping"")\n args = p.parse_args()\n return SerializeConfig(\n output_dir=args.output_dir,\n shard_size=args.shard_size,\n target_chars=args.target_chars,\n overlap_chars=args.overlap_chars,\n min_session_chars=args.min_session_chars,\n max_docs=args.max_docs,\n long_pause_threshold_ms=args.long_pause_threshold_ms,\n csv_root=(args.csv_root if args.csv_root else None),\n val_ratio=args.val_ratio,\n arrayrecord_group_size=args.arrayrecord_group_size,\n )\n\n\ndef main() -> None:\n cfg = parse_args()\n to_array_record(cfg)\n\n\nif __name__ == ""__main__"":\n main()",python,tab
|
| 3 |
+
2,164,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:59:11 PM [info] Activating crowd-code\n1:59:11 PM [info] Recording started\n1:59:11 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 4 |
+
3,207,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"1:59:12 PM [info] Git repository found\n1:59:12 PM [info] Git provider initialized successfully\n1:59:12 PM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,2166,"crowd-pilot/serialize_dataset_array_record.py",0,0,"",python,tab
|
| 6 |
+
5,5754,"TERMINAL",0,0,"",,terminal_command
|
| 7 |
+
6,5798,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 8 |
+
7,8733,"crowd-pilot/serialize_dataset_array_record.py",0,0,"",python,tab
|
| 9 |
+
8,8735,"TERMINAL",0,0,"",,terminal_focus
|
| 10 |
+
9,9286,"TERMINAL",0,0,"source /home/franz.srambical/crowd-pilot/.venv/bin/activate",,terminal_command
|
| 11 |
+
10,9289,"TERMINAL",0,0,"]633;C]0;franz.srambical@hai-login1:~/crowd-pilot",,terminal_output
|
| 12 |
+
11,10584,"TERMINAL",0,0,"squeue",,terminal_command
|
| 13 |
+
12,10602,"TERMINAL",0,0,"]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n 32793 mihir.maha interacti 1 2 R 2025-10-25T13:34:00 2025-10-25T13:34:00 25:22 2:00:00 hai004\r\n 32786 xiao.liu interacti 1 128 R 2025-10-25T10:25:10 2025-10-25T10:25:10 3:34:12 23:59:00 hai003\r\n 32778 xiao.liu interacti 1 128 R 2025-10-24T23:50:46 2025-10-24T23:50:46 14:08:36 23:59:00 hai004\r\n 32788 nishant.ku standard 3 96 R 2025-10-25T12:37:42 2025-10-25T13:29:21 30:01 1-00:00:00 hai[002,005,008]\r\n 32792 mihir.maha standard 1 10 R 2025-10-25T13:21:13 2025-10-25T13:21:13 38:09 1-00:00:00 hai003\r\n 32791 mihir.maha standard 1 10 R 2025-10-25T12:50:42 2025-10-25T12:51:01 1:08:21 1-00:00:00 hai004\r\n 32790 mihir.maha standard 1 10 R 2025-10-25T12:50:38 2025-10-25T12:50:41 1:08:41 1-00:00:00 hai003\r\n 32789 mihir.maha standard 1 10 R 2025-10-25T12:50:33 2025-10-25T12:50:36 1:08:46 1-00:00:00 hai003\r\n 32785 xiao.liu standard 1 128 R 2025-10-25T10:24:53 2025-10-25T10:25:04 3:34:18 23:59:00 hai001\r\n 32774 xiao.liu standard 1 128 R 2025-10-24T21:17:44 2025-10-24T21:17:44 16:41:38 23:59:00 hai007\r\n 32771 mihir.maha standard 1 32 R 2025-10-24T20:59:40 2025-10-24T20:59:43 16:59:39 1-00:00:00 hai004\r\n 32770 mihir.maha standard 1 32 R 2025-10-24T20:59:34 2025-10-24T20:59:39 16:59:43 1-00:00:00 hai003\r\n]0;franz.srambical@hai-login1:~/crowd-pilot",,terminal_output
|
| 14 |
+
13,16247,"TERMINAL",0,0,"",,terminal_focus
|
| 15 |
+
14,16752,"TERMINAL",0,0,"source /home/franz.srambical/crowd-pilot/.venv/bin/activate",,terminal_command
|
| 16 |
+
15,16755,"TERMINAL",0,0,"]633;C]0;franz.srambical@hai-login1:~/crowd-pilot",,terminal_output
|
| 17 |
+
16,106209,"TERMINAL",0,0,"",,terminal_command
|
| 18 |
+
17,664565,"TERMINAL",0,0,"deactivate",,terminal_command
|
| 19 |
+
18,666568,"TERMINAL",0,0,"cd maxtext/",,terminal_command
|
| 20 |
+
19,670083,"TERMINAL",0,0,"source /home/franz.srambical/crowd-pilot/maxtext/.venv/bin/activate",,terminal_command
|
| 21 |
+
20,671751,"TERMINAL",0,0,"cd ..",,terminal_command
|
| 22 |
+
21,674476,"TERMINAL",0,0,"tensorboard --logdir /fast/project/HFMI_SynergyUnit/jafar_ws/data/crowd-pilot/outputs/",,terminal_command
|
| 23 |
+
22,674527,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 24 |
+
23,674667,"TERMINAL",0,0,"/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/tensorboard/default.py:30: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\r\n import pkg_resources\r\n",,terminal_output
|
| 25 |
+
24,676003,"TERMINAL",0,0,"2025-10-25 14:10:27.809040: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\r\n2025-10-25 14:10:27.882376: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1761394227.895248 2018689 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1761394227.899603 2018689 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nW0000 00:00:1761394227.910331 2018689 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1761394227.910349 2018689 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1761394227.910351 2018689 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1761394227.910352 2018689 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n2025-10-25 14:10:27.913231: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\r\nTo enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\r\n",,terminal_output
|
| 26 |
+
25,682363,"TERMINAL",0,0,"2025-10-25 14:10:34.246902: E external/local_xla/xla/stream_executor/cuda/cuda_platform.cc:51] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303)\r\n",,terminal_output
|
| 27 |
+
26,682992,"TERMINAL",0,0,"\r\nNOTE: Using experimental fast data loading logic. To disable, pass\r\n ""--load_fast=false"" and report issues on GitHub. More details:\r\n https://github.com/tensorflow/tensorboard/issues/4784\r\n\r\nE1025 14:10:34.891064 140392782652480 application.py:125] Failed to load plugin ProfilePluginLoader.load; ignoring it.\r\nTraceback (most recent call last):\r\n File ""/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/tensorboard/backend/application.py"", line 123, in TensorBoardWSGIApp\r\n plugin = loader.load(context)\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File ""/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/tensorboard_plugin_profile/profile_plugin_loader.py"", line 75, in load\r\n from tensorboard_plugin_profile import profile_plugin\r\n File ""/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/tensorboard_plugin_profile/profile_plugin.py"", line 36, in <module>\r\n from tensorboard_plugin_profile.convert import raw_to_tool_data as convert\r\n File ""/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/tensorboard_plugin_profile/convert/raw_to_tool_data.py"", line 29, in <module>\r\n from tensorboard_plugin_profile.convert import input_pipeline_proto_to_gviz\r\n File ""/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/tensorboard_plugin_profile/convert/input_pipeline_proto_to_gviz.py"", line 28, in <module>\r\n from tensorboard_plugin_profile.protobuf import input_pipeline_pb2\r\n File ""/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/tensorboard_plugin_profile/protobuf/input_pipeline_pb2.py"", line 17, in <module>\r\n from tensorboard_plugin_profile.protobuf import diagnostics_pb2 as plugin_dot_tensorboard__plugin__profile_dot_protobuf_dot_diagnostics__pb2\r\n File ""/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/tensorboard_plugin_profile/protobuf/diagnostics_pb2.py"", line 36, in <module>\r\n _descriptor.FieldDescriptor(\r\n File ""/fast/home/franz.srambical/crowd-pilot/maxtext/.venv/lib/python3.12/site-packages/google/protobuf/descriptor.py"", line 621, in __new__\r\n _message.Message._CheckCalledFromGeneratedFile()\r\nTypeError: Descriptors cannot be created directly.\r\nIf this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.\r\nIf you cannot immediately regenerate your protos, some other possible workarounds are:\r\n 1. Downgrade the protobuf package to 3.20.x or lower.\r\n 2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).\r\n\r\nMore information: https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates\r\n",,terminal_output
|
| 28 |
+
27,683194,"TERMINAL",0,0,"Serving TensorBoard on localhost; to expose to the network, use a proxy or pass --bind_all\r\nTensorBoard 2.19.0 at http://localhost:6006/ (Press CTRL+C to quit)\r\n",,terminal_output
|
| 29 |
+
28,4979578,"crowd-pilot/serialize_dataset_array_record.py",23,0,"",python,selection_command
|
| 30 |
+
29,4979826,"crowd-pilot/serialize_dataset_array_record.py",27,0,"",python,selection_command
|
| 31 |
+
30,4979851,"crowd-pilot/serialize_dataset_array_record.py",93,0,"",python,selection_command
|
| 32 |
+
31,4979880,"crowd-pilot/serialize_dataset_array_record.py",97,0,"",python,selection_command
|
| 33 |
+
32,4979917,"crowd-pilot/serialize_dataset_array_record.py",98,0,"",python,selection_command
|
| 34 |
+
33,4979949,"crowd-pilot/serialize_dataset_array_record.py",133,0,"",python,selection_command
|
| 35 |
+
34,4979988,"crowd-pilot/serialize_dataset_array_record.py",134,0,"",python,selection_command
|
| 36 |
+
35,4980014,"crowd-pilot/serialize_dataset_array_record.py",150,0,"",python,selection_command
|
| 37 |
+
36,4980043,"crowd-pilot/serialize_dataset_array_record.py",160,0,"",python,selection_command
|
| 38 |
+
37,4980078,"crowd-pilot/serialize_dataset_array_record.py",185,0,"",python,selection_command
|
| 39 |
+
38,4980113,"crowd-pilot/serialize_dataset_array_record.py",222,0,"",python,selection_command
|
| 40 |
+
39,4980146,"crowd-pilot/serialize_dataset_array_record.py",236,0,"",python,selection_command
|
| 41 |
+
40,4980180,"crowd-pilot/serialize_dataset_array_record.py",237,0,"",python,selection_command
|
| 42 |
+
41,4980217,"crowd-pilot/serialize_dataset_array_record.py",257,0,"",python,selection_command
|
| 43 |
+
42,4980523,"crowd-pilot/serialize_dataset_array_record.py",258,0,"",python,selection_command
|
| 44 |
+
43,4980696,"crowd-pilot/serialize_dataset_array_record.py",272,0,"",python,selection_command
|
| 45 |
+
44,4981062,"crowd-pilot/serialize_dataset_array_record.py",277,0,"",python,selection_command
|
| 46 |
+
45,4981308,"crowd-pilot/serialize_dataset_array_record.py",289,0,"",python,selection_command
|
| 47 |
+
46,4981337,"crowd-pilot/serialize_dataset_array_record.py",290,0,"",python,selection_command
|
| 48 |
+
47,4981418,"crowd-pilot/serialize_dataset_array_record.py",297,0,"",python,selection_command
|
| 49 |
+
48,4981419,"crowd-pilot/serialize_dataset_array_record.py",304,0,"",python,selection_command
|
| 50 |
+
49,4981781,"crowd-pilot/serialize_dataset_array_record.py",330,0,"",python,selection_command
|
| 51 |
+
50,4982191,"crowd-pilot/serialize_dataset_array_record.py",330,0," ",python,content
|
| 52 |
+
51,4982191,"crowd-pilot/serialize_dataset_array_record.py",331,0,"",python,selection_keyboard
|
| 53 |
+
52,4982544,"crowd-pilot/serialize_dataset_array_record.py",331,0,"#",python,content
|
| 54 |
+
53,4982544,"crowd-pilot/serialize_dataset_array_record.py",332,0,"",python,selection_keyboard
|
| 55 |
+
54,4983408,"crowd-pilot/serialize_dataset_array_record.py",332,0," type: ignore",python,content
|
| 56 |
+
55,4983591,"crowd-pilot/serialize_dataset_array_record.py",344,0,"",python,selection_command
|
| 57 |
+
56,4984914,"crowd-pilot/serialize_dataset_array_record.py",272,0,"",python,selection_command
|
| 58 |
+
57,4985307,"crowd-pilot/serialize_dataset_array_record.py",346,0,"",python,selection_command
|
| 59 |
+
58,4985486,"crowd-pilot/serialize_dataset_array_record.py",272,0,"",python,selection_command
|
| 60 |
+
59,4985605,"crowd-pilot/serialize_dataset_array_record.py",258,0,"",python,selection_command
|
| 61 |
+
60,4986027,"crowd-pilot/serialize_dataset_array_record.py",258,14,"",python,content
|
| 62 |
+
61,4996494,"crowd-pilot/serialize_common.py",0,0,"#!/usr/bin/env python3\n""""""\nCommon utilities for dataset serialization scripts.\n""""""\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Dict\n\nimport pandas as pd\nfrom datasets import Dataset, load_dataset\n\n\n@dataclass\nclass SerializeConfig:\n output_dir: str\n shard_size: int\n target_chars: int\n overlap_chars: int\n min_session_chars: int\n max_docs: Optional[int]\n long_pause_threshold_ms: int\n csv_root: Optional[str]\n val_ratio: float\n arrayrecord_group_size: Optional[int] = None\n\n\ndef _clean_text(text: str) -> str:\n # Normalize line endings and strip trailing spaces; preserve tabs/newlines.\n return text.replace(""\r\n"", ""\n"").replace(""\r"", ""\n"").rstrip()\n\n\ndef _fenced_block(path: str, language: Optional[str], content: str) -> str:\n lang = (language or """").lower()\n return f""```{lang}\n{content}\n```\n""\n\n\ndef _apply_change(content: str, offset: int, length: int, new_text: str) -> str:\n # Mirrors crowd_code_player.replay_file.apply_change\n base = str(content)\n text = str(new_text) if pd.notna(new_text) else """"\n text = text.replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n if offset > len(base):\n base = base + ("" "" * (offset - len(base)))\n return base[:offset] + text + base[offset + length:]\n\n\ndef _session_to_transcript(\n df: pd.DataFrame,\n long_pause_threshold_ms: int,\n) -> str:\n\n file_states: Dict[str, str] = {}\n terminal_state: str = """"\n per_file_event_counts: Dict[str, int] = {}\n per_file_cursor_positions: Dict[str, Tuple[int, int]] = {} # (offset, length) for each file\n last_time_ms: Optional[int] = None\n\n parts: List[str] = []\n\n for i in range(len(df)):\n row = df.iloc[i]\n file_path: str = row[""File""]\n event_time: int = row[""Time""]\n language: Optional[str] = row[""Language""]\n\n # Long pause detection\n if last_time_ms is not None:\n delta = event_time - last_time_ms\n if delta > long_pause_threshold_ms:\n # TODO (f.srambical): think about whether we want to emit this as an observation or not\n parts.append(f""<obs long_pause ms=\""{delta}\"" />"")\n last_time_ms = event_time\n\n event_type = row[""Type""]\n\n match event_type:\n case ""tab"":\n # File switch event\n parts.append(f""<act focus file=\""{file_path}\"" />"")\n \n # If Text is present, this is the first time opening the file\n # and the entire file content is captured\n text = row[""Text""]\n if pd.notna(text):\n file_content = str(text).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n file_states[file_path] = file_content\n parts.append(f""// observation: file={file_path}"")\n parts.append(_fenced_block(file_path, language, _clean_text(file_content)))\n\n case ""terminal_command"":\n # Terminal command execution\n command = row[""Text""]\n command_str = str(command).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f""<act terminal_command />"")\n parts.append(_fenced_block(file_path, ""bash"", _clean_text(command_str)))\n\n case ""terminal_output"":\n # Terminal output capture\n output = row[""Text""]\n output_str = str(output).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f""<obs terminal_output />"")\n parts.append(_fenced_block(file_path, None, _clean_text(output_str)))\n\n case ""terminal_focus"":\n # Terminal focus event\n parts.append(f""<act focus target=\""terminal\"" />"")\n\n case ""git_branch_checkout"":\n # Git branch checkout event\n branch_info = row[""Text""]\n branch_str = str(branch_info).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f""<act git_branch_checkout />"")\n parts.append(f""// git: {_clean_text(branch_str)}"")\n\n case ""selection_command"" | ""selection_mouse"" | ""selection_keyboard"":\n # Handle cursor movement\n offset = row[""RangeOffset""]\n length = row[""RangeLength""]\n old_cursor = per_file_cursor_positions.get(file_path, (0, 0))\n new_cursor = (offset, length)\n per_file_cursor_positions[file_path] = new_cursor\n \n # Emit cursor movement observation if position changed\n if old_cursor != new_cursor:\n parts.append(f""<act cursor file=\""{file_path}\"" offset=\""{offset}\"" len=\""{length}\"" />"")\n\n case ""content"":\n # Handle file edit events\n offset = row[""RangeOffset""]\n length = row[""RangeLength""]\n new_text = row[""Text""]\n new_text_str = str(new_text) if pd.notna(new_text) else """"\n\n operation = ""noop""\n if length == 0 and new_text_str:\n operation = ""insert""\n elif length > 0 and not new_text_str:\n operation = ""delete""\n elif length > 0 and new_text_str:\n operation = ""replace""\n\n parts.append(f""<act {operation} file=\""{file_path}\"" offset=\""{offset}\"" len=\""{length}\"" />"")\n\n if new_text_str and (operation == ""insert"" or operation == ""replace""):\n parts.append(_fenced_block(file_path, language, _clean_text(new_text_str)))\n\n before = file_states.get(file_path, """")\n after = _apply_change(before, offset, length, new_text)\n file_states[file_path] = after\n per_file_event_counts[file_path] = per_file_event_counts.get(file_path, 0) + 1\n\n # Update cursor position after edit (cursor moves to end of inserted/replaced text)\n per_file_cursor_positions[file_path] = (offset + len(new_text_str), 0)\n\n case _:\n raise ValueError(f""Unknown event type: {event_type}"")\n\n return ""\n"".join(parts).strip()\n\n\ndef load_hf_csv(hf_path: str, split: str) -> Dataset:\n loaded = load_dataset(hf_path, split=split)\n\n assert isinstance(loaded, Dataset), ""Expected a Dataset from load_dataset""\n return loaded\n\n\ndef _discover_local_sessions(root: Path) -> List[Path]:\n # Recursively find all CSV files\n paths: List[Path] = []\n for p in root.rglob(""*.csv""):\n if p.is_file():\n paths.append(p)\n paths.sort()\n return paths\n\n\ndef _chunk_text(text: str, target_chars: int, overlap_chars: int) -> List[str]:\n """"""Split a long text into overlapping chunks near target length.""""""\n if target_chars <= 0:\n return [text]\n n = len(text)\n if n <= target_chars:\n return [text]\n\n chunks: List[str] = []\n start = 0\n # Ensure sane overlap\n overlap = max(0, min(overlap_chars, target_chars // 2))\n while start < n:\n end_target = min(start + target_chars, n)\n if end_target < n:\n end = end_target\n else:\n end = n\n chunk = text[start:end].strip()\n chunks.append(chunk)\n if end == n:\n break\n # advance with overlap\n start = max(0, end - overlap)\n if start >= n:\n break\n return chunks\n\n\n",python,tab
|
| 63 |
+
62,5001372,"crowd-pilot/serialize_dataset_parquet.py",0,0,"#!/usr/bin/env python3\n""""""\nCSV sessions -> Parquet shards for MaxText Grain pretraining.\n""""""\n\nfrom __future__ import annotations\n\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import List, Tuple, cast\nimport random\n\nimport pandas as pd\n\nfrom serialize_common import (\n SerializeConfig,\n _session_to_transcript,\n _discover_local_sessions,\n _chunk_text,\n)\n\n\ndef load_hf_csv(hf_path: str, split: str):\n from datasets import load_dataset\n loaded = load_dataset(hf_path, split=split)\n from datasets import Dataset\n assert isinstance(loaded, Dataset), ""Expected a Dataset from load_dataset""\n return loaded\n\n\ndef to_parquet(\n cfg: SerializeConfig,\n) -> None:\n os.makedirs(cfg.output_dir, exist_ok=True)\n\n required_cols = [""Sequence"", ""Time"", ""File"", ""RangeOffset"", ""RangeLength"", ""Text"", ""Language"", ""Type""]\n\n # Build a list of per-session DataFrames with file paths\n session_dataframes: List[Tuple[pd.DataFrame, str]] = []\n root = Path(cast(str, cfg.csv_root)).expanduser().resolve()\n csv_files = _discover_local_sessions(root)\n assert csv_files, f""No CSV files found under {root}""\n for csv_file in csv_files:\n df = pd.read_csv(csv_file)\n missing_local = [c for c in required_cols if c not in df.columns]\n assert not missing_local, f""Missing required CSV columns in {csv_file}: {missing_local}""\n session_dataframes.append((df, str(csv_file)))\n\n random.seed(42)\n session_dataframes = [(df, path) for df, path in session_dataframes]\n random.shuffle(session_dataframes)\n \n total_sessions = len(session_dataframes)\n val_count = int(total_sessions * cfg.val_ratio)\n train_count = total_sessions - val_count\n\n train_rows = 0\n val_rows = 0\n train_shard_idx = 0\n val_shard_idx = 0\n docs_written = 0\n\n def write_shard(chunks: List[str], split: str, shard_idx: int) -> int:\n if not chunks:\n return 0\n df_out = pd.DataFrame({""text"": chunks})\n out_path = Path(cfg.output_dir) / f""{split}_{shard_idx:05d}.parquet""\n df_out.to_parquet(out_path, index=False)\n return len(df_out)\n\n for i, (session_df, session_path) in enumerate(session_dataframes):\n session_df = pd.DataFrame(session_df.copy())\n transcript = _session_to_transcript(\n session_df,\n long_pause_threshold_ms=cfg.long_pause_threshold_ms,\n )\n # Skip sessions that are too short\n if len(transcript.strip()) < cfg.min_session_chars:\n print(f""Skipping session {session_path} because it's too short ({len(transcript.strip())} chars)"")\n continue\n chunks = _chunk_text(transcript, cfg.target_chars, cfg.overlap_chars)\n if not chunks:\n continue\n docs_written += len(chunks)\n \n # Write chunks to appropriate split based on position\n if i < train_count:\n rows_written = write_shard(chunks, ""train"", train_shard_idx)\n train_rows += rows_written\n train_shard_idx += 1\n else:\n rows_written = write_shard(chunks, ""val"", val_shard_idx)\n val_rows += rows_written\n val_shard_idx += 1\n \n if cfg.max_docs and docs_written >= cfg.max_docs:\n break\n\n print(f""Wrote {train_rows} train and {val_rows} val documents to {cfg.output_dir}"")\n\n\ndef parse_args() -> SerializeConfig:\n p = argparse.ArgumentParser(description=""Serialize HF CSV sessions to Parquet for MaxText Grain"")\n p.add_argument(""--csv_root"", type=str, required=True, help=""Root directory containing per-session CSV files"")\n p.add_argument(""--output_dir"", type=str, required=True, help=""Output directory for Parquet shards"")\n p.add_argument(""--shard_size"", type=int, default=20000, help=""Rows per Parquet shard"")\n # FIXME(f.srambical): It is awkward that the target number is in character-space instead of in token-space.\n p.add_argument(""--target_chars"", type=int, default=8192, help=""Target characters per document chunk. This should be ~3-4x the max token length of the model you are using."")\n p.add_argument(""--overlap_chars"", type=int, default=128, help=""Character overlap between chunks"")\n p.add_argument(""--min_session_chars"", type=int, default=1024, help=""Minimum characters to keep a session"")\n p.add_argument(""--max_docs"", type=int, default=None, help=""Stop after writing this many unique docs"")\n p.add_argument(""--long_pause_threshold_ms"", type=int, default=120000, help=""Threshold (ms) to annotate long pauses and emit a keyframe"")\n p.add_argument(""--val_ratio"", type=float, default=0.10, help=""Fraction of sessions to route to validation [0,1)"")\n args = p.parse_args()\n return SerializeConfig(\n output_dir=args.output_dir,\n shard_size=args.shard_size,\n target_chars=args.target_chars,\n overlap_chars=args.overlap_chars,\n min_session_chars=args.min_session_chars,\n max_docs=args.max_docs,\n long_pause_threshold_ms=args.long_pause_threshold_ms,\n csv_root=(args.csv_root if args.csv_root else None),\n val_ratio=args.val_ratio,\n )\n\n\ndef main() -> None:\n cfg = parse_args()\n to_parquet(cfg)\n\n\nif __name__ == ""__main__"":\n main()",python,tab
|
| 64 |
+
63,5003894,"crowd-pilot/serialize_common.py",0,0,"",python,tab
|
| 65 |
+
64,5012583,"crowd-pilot/serialization_utils.py",0,0,"#!/usr/bin/env python3\n""""""\nCommon utilities for dataset serialization scripts.\n""""""\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Dict\n\nimport pandas as pd\nfrom datasets import Dataset, load_dataset\n\n\n@dataclass\nclass SerializeConfig:\n output_dir: str\n shard_size: int\n target_chars: int\n overlap_chars: int\n min_session_chars: int\n max_docs: Optional[int]\n long_pause_threshold_ms: int\n csv_root: Optional[str]\n val_ratio: float\n arrayrecord_group_size: Optional[int] = None\n\n\ndef _clean_text(text: str) -> str:\n # Normalize line endings and strip trailing spaces; preserve tabs/newlines.\n return text.replace(""\r\n"", ""\n"").replace(""\r"", ""\n"").rstrip()\n\n\ndef _fenced_block(path: str, language: Optional[str], content: str) -> str:\n lang = (language or """").lower()\n return f""```{lang}\n{content}\n```\n""\n\n\ndef _apply_change(content: str, offset: int, length: int, new_text: str) -> str:\n # Mirrors crowd_code_player.replay_file.apply_change\n base = str(content)\n text = str(new_text) if pd.notna(new_text) else """"\n text = text.replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n if offset > len(base):\n base = base + ("" "" * (offset - len(base)))\n return base[:offset] + text + base[offset + length:]\n\n\ndef _session_to_transcript(\n df: pd.DataFrame,\n long_pause_threshold_ms: int,\n) -> str:\n\n file_states: Dict[str, str] = {}\n terminal_state: str = """"\n per_file_event_counts: Dict[str, int] = {}\n per_file_cursor_positions: Dict[str, Tuple[int, int]] = {} # (offset, length) for each file\n last_time_ms: Optional[int] = None\n\n parts: List[str] = []\n\n for i in range(len(df)):\n row = df.iloc[i]\n file_path: str = row[""File""]\n event_time: int = row[""Time""]\n language: Optional[str] = row[""Language""]\n\n # Long pause detection\n if last_time_ms is not None:\n delta = event_time - last_time_ms\n if delta > long_pause_threshold_ms:\n # TODO (f.srambical): think about whether we want to emit this as an observation or not\n parts.append(f""<obs long_pause ms=\""{delta}\"" />"")\n last_time_ms = event_time\n\n event_type = row[""Type""]\n\n match event_type:\n case ""tab"":\n # File switch event\n parts.append(f""<act focus file=\""{file_path}\"" />"")\n \n # If Text is present, this is the first time opening the file\n # and the entire file content is captured\n text = row[""Text""]\n if pd.notna(text):\n file_content = str(text).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n file_states[file_path] = file_content\n parts.append(f""// observation: file={file_path}"")\n parts.append(_fenced_block(file_path, language, _clean_text(file_content)))\n\n case ""terminal_command"":\n # Terminal command execution\n command = row[""Text""]\n command_str = str(command).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f""<act terminal_command />"")\n parts.append(_fenced_block(file_path, ""bash"", _clean_text(command_str)))\n\n case ""terminal_output"":\n # Terminal output capture\n output = row[""Text""]\n output_str = str(output).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f""<obs terminal_output />"")\n parts.append(_fenced_block(file_path, None, _clean_text(output_str)))\n\n case ""terminal_focus"":\n # Terminal focus event\n parts.append(f""<act focus target=\""terminal\"" />"")\n\n case ""git_branch_checkout"":\n # Git branch checkout event\n branch_info = row[""Text""]\n branch_str = str(branch_info).replace(""\\n"", ""\n"").replace(""\\r"", ""\r"")\n parts.append(f""<act git_branch_checkout />"")\n parts.append(f""// git: {_clean_text(branch_str)}"")\n\n case ""selection_command"" | ""selection_mouse"" | ""selection_keyboard"":\n # Handle cursor movement\n offset = row[""RangeOffset""]\n length = row[""RangeLength""]\n old_cursor = per_file_cursor_positions.get(file_path, (0, 0))\n new_cursor = (offset, length)\n per_file_cursor_positions[file_path] = new_cursor\n \n # Emit cursor movement observation if position changed\n if old_cursor != new_cursor:\n parts.append(f""<act cursor file=\""{file_path}\"" offset=\""{offset}\"" len=\""{length}\"" />"")\n\n case ""content"":\n # Handle file edit events\n offset = row[""RangeOffset""]\n length = row[""RangeLength""]\n new_text = row[""Text""]\n new_text_str = str(new_text) if pd.notna(new_text) else """"\n\n operation = ""noop""\n if length == 0 and new_text_str:\n operation = ""insert""\n elif length > 0 and not new_text_str:\n operation = ""delete""\n elif length > 0 and new_text_str:\n operation = ""replace""\n\n parts.append(f""<act {operation} file=\""{file_path}\"" offset=\""{offset}\"" len=\""{length}\"" />"")\n\n if new_text_str and (operation == ""insert"" or operation == ""replace""):\n parts.append(_fenced_block(file_path, language, _clean_text(new_text_str)))\n\n before = file_states.get(file_path, """")\n after = _apply_change(before, offset, length, new_text)\n file_states[file_path] = after\n per_file_event_counts[file_path] = per_file_event_counts.get(file_path, 0) + 1\n\n # Update cursor position after edit (cursor moves to end of inserted/replaced text)\n per_file_cursor_positions[file_path] = (offset + len(new_text_str), 0)\n\n case _:\n raise ValueError(f""Unknown event type: {event_type}"")\n\n return ""\n"".join(parts).strip()\n\n\ndef load_hf_csv(hf_path: str, split: str) -> Dataset:\n loaded = load_dataset(hf_path, split=split)\n\n assert isinstance(loaded, Dataset), ""Expected a Dataset from load_dataset""\n return loaded\n\n\ndef _discover_local_sessions(root: Path) -> List[Path]:\n # Recursively find all CSV files\n paths: List[Path] = []\n for p in root.rglob(""*.csv""):\n if p.is_file():\n paths.append(p)\n paths.sort()\n return paths\n\n\ndef _chunk_text(text: str, target_chars: int, overlap_chars: int) -> List[str]:\n """"""Split a long text into overlapping chunks near target length.""""""\n if target_chars <= 0:\n return [text]\n n = len(text)\n if n <= target_chars:\n return [text]\n\n chunks: List[str] = []\n start = 0\n # Ensure sane overlap\n overlap = max(0, min(overlap_chars, target_chars // 2))\n while start < n:\n end_target = min(start + target_chars, n)\n if end_target < n:\n end = end_target\n else:\n end = n\n chunk = text[start:end].strip()\n chunks.append(chunk)\n if end == n:\n break\n # advance with overlap\n start = max(0, end - overlap)\n if start >= n:\n break\n return chunks\n\n\n",python,tab
|
| 66 |
+
65,5014532,"crowd-pilot/serialize_dataset_parquet.py",0,0,"",python,tab
|
| 67 |
+
66,5016377,"crowd-pilot/serialize_dataset_parquet.py",273,0,"",python,selection_mouse
|
| 68 |
+
67,5017076,"crowd-pilot/serialize_dataset_parquet.py",275,0,"_utils",python,content
|
| 69 |
+
68,5017076,"crowd-pilot/serialize_dataset_parquet.py",273,0,"ati",python,content
|
| 70 |
+
69,5017076,"crowd-pilot/serialize_dataset_parquet.py",267,6,"",python,content
|
| 71 |
+
70,5021666,"crowd-pilot/serialize_dataset_parquet.py",392,0,"",python,selection_command
|
| 72 |
+
71,5023171,"crowd-pilot/serialize_dataset_parquet.py",388,42,"def load_hf_csv(hf_path: str, split: str):",python,selection_command
|
| 73 |
+
72,5023324,"crowd-pilot/serialize_dataset_parquet.py",388,80,"def load_hf_csv(hf_path: str, split: str):\n from datasets import load_dataset",python,selection_command
|
| 74 |
+
73,5023431,"crowd-pilot/serialize_dataset_parquet.py",388,128,"def load_hf_csv(hf_path: str, split: str):\n from datasets import load_dataset\n loaded = load_dataset(hf_path, split=split)",python,selection_command
|
| 75 |
+
74,5023565,"crowd-pilot/serialize_dataset_parquet.py",388,161,"def load_hf_csv(hf_path: str, split: str):\n from datasets import load_dataset\n loaded = load_dataset(hf_path, split=split)\n from datasets import Dataset",python,selection_command
|
| 76 |
+
75,5023701,"crowd-pilot/serialize_dataset_parquet.py",388,240,"def load_hf_csv(hf_path: str, split: str):\n from datasets import load_dataset\n loaded = load_dataset(hf_path, split=split)\n from datasets import Dataset\n assert isinstance(loaded, Dataset), ""Expected a Dataset from load_dataset""",python,selection_command
|
| 77 |
+
76,5023850,"crowd-pilot/serialize_dataset_parquet.py",388,258,"def load_hf_csv(hf_path: str, split: str):\n from datasets import load_dataset\n loaded = load_dataset(hf_path, split=split)\n from datasets import Dataset\n assert isinstance(loaded, Dataset), ""Expected a Dataset from load_dataset""\n return loaded",python,selection_command
|
| 78 |
+
77,5023969,"crowd-pilot/serialize_dataset_parquet.py",388,259,"def load_hf_csv(hf_path: str, split: str):\n from datasets import load_dataset\n loaded = load_dataset(hf_path, split=split)\n from datasets import Dataset\n assert isinstance(loaded, Dataset), ""Expected a Dataset from load_dataset""\n return loaded\n",python,selection_command
|
| 79 |
+
78,5024175,"crowd-pilot/serialize_dataset_parquet.py",388,260,"",python,content
|
| 80 |
+
79,5025922,"crowd-pilot/serialize_dataset_parquet.py",388,1,"",python,content
|
| 81 |
+
80,5027834,"crowd-pilot/serialize_dataset_array_record.py",0,0,"",python,tab
|
| 82 |
+
81,5029035,"crowd-pilot/serialize_dataset_array_record.py",371,0,"",python,selection_mouse
|
| 83 |
+
82,5029827,"crowd-pilot/serialize_dataset_array_record.py",379,0,"_utils",python,content
|
| 84 |
+
83,5029827,"crowd-pilot/serialize_dataset_array_record.py",377,0,"ati",python,content
|
| 85 |
+
84,5029827,"crowd-pilot/serialize_dataset_array_record.py",371,6,"",python,content
|
| 86 |
+
85,5032437,"crowd-pilot/serialize_dataset_parquet.py",0,0,"",python,tab
|
| 87 |
+
86,5033705,"crowd-pilot/serialize_dataset_array_record.py",0,0,"",python,tab
|
| 88 |
+
87,5034721,"crowd-pilot/serialize_dataset_parquet.py",0,0,"",python,tab
|
| 89 |
+
88,5035584,"crowd-pilot/serialize_dataset_parquet.py",386,0,"",python,selection_mouse
|
| 90 |
+
89,5035985,"crowd-pilot/serialize_dataset_parquet.py",387,0,"",python,selection_mouse
|
| 91 |
+
90,5036957,"crowd-pilot/serialization_utils.py",0,0,"",python,tab
|
| 92 |
+
91,5037618,"crowd-pilot/serialize_dataset_array_record.py",0,0,"",python,tab
|
| 93 |
+
92,5038243,"crowd-pilot/serialize_dataset_parquet.py",0,0,"",python,tab
|
| 94 |
+
93,5041451,"crowd-pilot/serialize_dataset_array_record.py",0,0,"",python,tab
|
| 95 |
+
94,5045564,"crowd-pilot/serialize_dataset_array_record.py",732,0,"",python,selection_mouse
|
| 96 |
+
95,5047550,"crowd-pilot/serialize_dataset_array_record.py",706,61,"",python,content
|
| 97 |
+
96,5047553,"crowd-pilot/serialize_dataset_array_record.py",710,0,"",python,selection_command
|
| 98 |
+
97,5048592,"crowd-pilot/serialize_dataset_array_record.py",2687,0,"",python,selection_keyboard
|
| 99 |
+
98,5049995,"crowd-pilot/serialize_dataset_array_record.py",5616,0,"",python,selection_keyboard
|
| 100 |
+
99,5051506,"crowd-pilot/serialize_dataset_array_record.py",5882,0,"",python,selection_keyboard
|
| 101 |
+
100,5051768,"crowd-pilot/serialize_dataset_array_record.py",3188,0,"",python,selection_keyboard
|
| 102 |
+
101,5055917,"crowd-pilot/serialize_dataset_array_record.py",3184,62,"",python,content
|
| 103 |
+
102,5055922,"crowd-pilot/serialize_dataset_array_record.py",3192,0,"",python,selection_command
|
| 104 |
+
103,5056935,"crowd-pilot/serialize_dataset_array_record.py",3182,0,"",python,selection_command
|
| 105 |
+
104,5057188,"crowd-pilot/serialize_dataset_array_record.py",3147,0,"",python,selection_command
|
| 106 |
+
105,5057212,"crowd-pilot/serialize_dataset_array_record.py",3126,0,"",python,selection_command
|
| 107 |
+
106,5057251,"crowd-pilot/serialize_dataset_array_record.py",3103,0,"",python,selection_command
|
| 108 |
+
107,5057276,"crowd-pilot/serialize_dataset_array_record.py",3025,0,"",python,selection_command
|
| 109 |
+
108,5057313,"crowd-pilot/serialize_dataset_array_record.py",3004,0,"",python,selection_command
|
| 110 |
+
109,5057344,"crowd-pilot/serialize_dataset_array_record.py",2893,0,"",python,selection_command
|
| 111 |
+
110,5057381,"crowd-pilot/serialize_dataset_array_record.py",2833,0,"",python,selection_command
|
| 112 |
+
111,5057412,"crowd-pilot/serialize_dataset_array_record.py",2790,0,"",python,selection_command
|
| 113 |
+
112,5057567,"crowd-pilot/serialize_dataset_array_record.py",2780,0,"",python,selection_command
|
| 114 |
+
113,5058149,"crowd-pilot/serialize_dataset_array_record.py",2790,0,"",python,selection_command
|
| 115 |
+
114,5059033,"crowd-pilot/serialize_dataset_array_record.py",2782,43,"",python,content
|
| 116 |
+
115,5059045,"crowd-pilot/serialize_dataset_array_record.py",2790,0,"",python,selection_command
|
| 117 |
+
116,5059737,"crowd-pilot/serialize_dataset_array_record.py",885,0,"",python,selection_keyboard
|
| 118 |
+
117,5060427,"crowd-pilot/serialize_dataset_array_record.py",0,0,"",python,selection_keyboard
|
| 119 |
+
118,5063331,"crowd-pilot/serialize_dataset_parquet.py",0,0,"",python,tab
|
| 120 |
+
119,5064181,"crowd-pilot/serialize_dataset_parquet.py",1540,0,"",python,selection_mouse
|
| 121 |
+
120,5064182,"crowd-pilot/serialize_dataset_parquet.py",1539,0,"",python,selection_command
|
| 122 |
+
121,5064498,"crowd-pilot/serialize_dataset_parquet.py",0,0,"",python,selection_command
|
| 123 |
+
122,5066771,"crowd-pilot/serialize_dataset_parquet.py",646,0,"",python,selection_mouse
|
| 124 |
+
123,5067311,"crowd-pilot/serialize_dataset_parquet.py",597,61,"",python,content
|
| 125 |
+
124,5067326,"crowd-pilot/serialize_dataset_parquet.py",601,0,"",python,selection_command
|
| 126 |
+
125,5068969,"crowd-pilot/serialize_dataset_parquet.py",2128,0,"",python,selection_mouse
|
| 127 |
+
126,5069405,"crowd-pilot/serialize_dataset_parquet.py",2092,43,"",python,content
|
| 128 |
+
127,5069418,"crowd-pilot/serialize_dataset_parquet.py",2100,0,"",python,selection_command
|
| 129 |
+
128,5070221,"crowd-pilot/serialize_dataset_parquet.py",2484,0,"",python,selection_mouse
|
| 130 |
+
129,5070658,"crowd-pilot/serialize_dataset_parquet.py",2451,62,"",python,content
|
| 131 |
+
130,5070669,"crowd-pilot/serialize_dataset_parquet.py",2459,0,"",python,selection_command
|
| 132 |
+
131,5074248,"crowd-pilot/serialize_dataset_parquet.py",1873,0,"",python,selection_mouse
|
| 133 |
+
132,5075167,"crowd-pilot/serialize_dataset_parquet.py",1947,0,"",python,selection_mouse
|
| 134 |
+
133,5075168,"crowd-pilot/serialize_dataset_parquet.py",1946,0,"",python,selection_command
|
| 135 |
+
134,5075694,"crowd-pilot/serialize_dataset_parquet.py",1873,0,"",python,selection_mouse
|
| 136 |
+
135,5077634,"crowd-pilot/serialize_dataset_parquet.py",1877,0,"",python,selection_mouse
|
| 137 |
+
136,5085591,"crowd-pilot/serialize_dataset_parquet.py",386,0,"",python,selection_mouse
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-38e9e69b-6954-425b-902a-36a03773ab2b1767632090892-2026_01_05-17.54.57.36/source.csv
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"crates/cli/src/main.rs",0,0,"//! CLI tool for serializing crowd-pilot IDE interaction data.\n//!\n//! This tool processes CSV session files and outputs JSONL format suitable for\n//! Miles SFT training. It uses the HuggingFace tokenizers Rust library for\n//! accurate token counting.\n\nuse std::path::PathBuf;\n\nuse clap::Parser;\nuse tokenizers::Tokenizer as HfTokenizer;\n\nuse crowd_pilot_serializer_core::{\n default_system_prompt,\n pipeline::{PipelineConfig, PipelineResult},\n process_all_sessions, write_jsonl_output, Tokenizer,\n};\n\n/// Serialize crowd-pilot CSV sessions to Miles JSONL format.\n#[derive(Parser, Debug)]\n#[command(name = ""crowd-pilot-serialize"")]\n#[command(author, version, about, long_about = None)]\nstruct Args {\n /// Root directory containing CSV session files\n #[arg(long)]\n csv_root: PathBuf,\n\n /// Output directory for JSONL files\n #[arg(long)]\n output_dir: PathBuf,\n\n /// HuggingFace tokenizer model name or path\n #[arg(long)]\n tokenizer: String,\n\n /// Maximum tokens per conversation chunk\n #[arg(long, default_value = ""8192"")]\n max_tokens_per_conversation: usize,\n\n /// Maximum tokens per message\n #[arg(long, default_value = ""2048"")]\n max_tokens_per_message: usize,\n\n /// Minimum messages required to keep a conversation\n #[arg(long, default_value = ""5"")]\n min_conversation_messages: usize,\n\n /// Viewport radius (lines above/below cursor)\n #[arg(long, default_value = ""10"")]\n viewport_radius: usize,\n\n /// Coalesce radius for grouping nearby edits\n #[arg(long, default_value = ""5"")]\n coalesce_radius: usize,\n\n /// Fraction of sessions for validation (0.0-1.0)\n #[arg(long, default_value = ""0.1"")]\n val_ratio: f64,\n\n /// Custom system prompt (optional)\n #[arg(long)]\n system_prompt: Option<String>,\n}\n\n/// Wrapper around HuggingFace tokenizers for token counting and truncation.\n///\n/// This uses the Rust-native tokenizers library, which is `Send + Sync`\n/// and enables true parallel tokenization without the Python GIL.\nstruct RustTokenizer {\n inner: HfTokenizer,\n}\n\nimpl RustTokenizer {\n /// Load a HuggingFace tokenizer from a model name or path.\n fn load(model_name: &str) -> Result<Self, Box<dyn std::error::Error>> {\n let inner = HfTokenizer::from_pretrained(model_name, None)\n .map_err(|e| e as Box<dyn std::error::Error>)?;\n Ok(Self { inner })\n }\n}\n\nimpl Tokenizer for RustTokenizer {\n fn count_tokens(&self, text: &str) -> usize {\n self.inner\n .encode(text, false)\n .expect(""Failed to encode text with tokenizer"")\n .get_ids()\n .len()\n }\n\n fn truncate_to_max_tokens(&self, text: &str, max_tokens: usize) -> String {\n let encoding = self.inner\n .encode(text, false)\n .expect(""Failed to encode text with tokenizer"");\n \n let ids = encoding.get_ids();\n if ids.len() <= max_tokens {\n return text.to_string();\n }\n \n let truncated_ids: Vec<u32> = ids[..max_tokens].to_vec();\n self.inner\n .decode(&truncated_ids, true)\n .expect(""Failed to decode truncated tokens"")\n }\n}\n\nfn main() -> Result<(), Box<dyn std::error::Error>> {\n let args = Args::parse();\n\n println!(""Loading tokenizer from {}..."", args.tokenizer);\n let tokenizer = RustTokenizer::load(&args.tokenizer)?;\n\n let config = PipelineConfig {\n max_tokens_per_conversation: args.max_tokens_per_conversation,\n max_tokens_per_message: args.max_tokens_per_message,\n min_conversation_messages: args.min_conversation_messages,\n viewport_radius: args.viewport_radius,\n coalesce_radius: args.coalesce_radius,\n val_ratio: args.val_ratio,\n };\n\n println!(""Processing CSV files from {:?}..."", args.csv_root);\n let session_results = process_all_sessions(\n &args.csv_root,\n &tokenizer,\n &config,\n )?;\n\n let total_sessions = session_results.len();\n println!(""Processed {} sessions"", total_sessions);\n\n let default_prompt = default_system_prompt(args.viewport_radius);\n let system_prompt = args.system_prompt.as_deref().unwrap_or(&default_prompt);\n\n println!(""Writing output to {:?}..."", args.output_dir);\n let result: PipelineResult = write_jsonl_output(\n session_results,\n &args.output_dir,\n args.val_ratio,\n system_prompt,\n )?;\n\n let metadata_path = args.output_dir.join(""metadata.json"");\n let metadata = serde_json::json!({\n ""config"": {\n ""csv_root"": args.csv_root.to_string_lossy(),\n ""output_dir"": args.output_dir.to_string_lossy(),\n ""tokenizer"": args.tokenizer,\n ""max_tokens_per_conversation"": args.max_tokens_per_conversation,\n ""max_tokens_per_message"": args.max_tokens_per_message,\n ""min_conversation_messages"": args.min_conversation_messages,\n ""viewport_radius"": args.viewport_radius,\n ""coalesce_radius"": args.coalesce_radius,\n ""val_ratio"": args.val_ratio,\n },\n ""counts"": {\n ""total_sessions"": result.total_sessions,\n ""total_conversations"": result.total_conversations,\n ""train_conversations"": result.train_conversations,\n ""val_conversations"": result.val_conversations,\n },\n ""stats"": {\n ""total_messages"": result.total_messages,\n ""total_tokens"": result.total_tokens,\n ""avg_messages_per_conversation"": if result.total_conversations > 0 {\n result.total_messages as f64 / result.total_conversations as f64\n } else {\n 0.0\n },\n ""avg_tokens_per_conversation"": if result.total_conversations > 0 {\n result.total_tokens as f64 / result.total_conversations as f64\n } else {\n 0.0\n },\n },\n ""files"": {\n ""train_path"": args.output_dir.join(""training.jsonl"").to_string_lossy(),\n ""val_path"": args.output_dir.join(""validation.jsonl"").to_string_lossy(),\n },\n });\n std::fs::write(&metadata_path, serde_json::to_string_pretty(&metadata)?)?;\n\n println!(""\n[summary]"");\n println!("" Total sessions processed: {}"", result.total_sessions);\n println!("" Train conversations: {}"", result.train_conversations);\n println!("" Val conversations: {}"", result.val_conversations);\n println!("" Total messages: {}"", result.total_messages);\n println!("" Total tokens: {}"", result.total_tokens);\n println!("" Output: {:?}/{{training,validation}}.jsonl"", args.output_dir);\n println!("" Metadata: {:?}"", metadata_path);\n\n Ok(())\n}\n",rust,tab
|
| 3 |
+
2,271,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:54:57 PM [info] Activating crowd-code\n5:54:57 PM [info] Recording started\n5:54:57 PM [info] Initializing git provider using file system watchers...\n5:54:57 PM [info] Git repository found\n5:54:57 PM [info] Git provider initialized successfully\n5:54:57 PM [info] Initial git state: [object Object]\n",Log,tab
|
| 4 |
+
3,2081,"crates/cli/src/main.rs",0,0,"",rust,tab
|
| 5 |
+
4,2483,"crates/cli/src/main.rs",686,0,"",rust,selection_command
|
| 6 |
+
5,2560,"crates/cli/src/main.rs",706,0,"",rust,selection_command
|
| 7 |
+
6,322570,"/home/franz.srambical/crowd-code/src/extension.ts",0,0,"import * as vscode from 'vscode'\nimport * as crypto from 'crypto'\nimport { getExportPath, logToOutput, outputChannel, addToGitignore } from './utilities'\nimport {\n\tupdateStatusBarItem,\n\tstartRecording,\n\tstopRecording,\n\tisCurrentFileExported,\n\tcommands,\n\trecording,\n\taddToFileQueue,\n\tbuildCsvRow,\n\tappendToFile,\n\tpanicButton,\n} from './recording'\nimport { ChangeType, CSVRowBuilder } from './types'\nimport { RecordFilesProvider, type RecordFile } from './recordFilesProvider'\nimport { ActionsProvider } from './actionsProvider'\nimport { initializeGitProvider, cleanupGitProvider } from './gitProvider'\nimport * as fs from 'node:fs'\nimport * as path from 'node:path'\nimport { showConsentChangeDialog, ensureConsent, hasConsent } from './consent'\n\nexport let statusBarItem: vscode.StatusBarItem\nexport let extContext: vscode.ExtensionContext\nexport let actionsProvider: ActionsProvider\n\nfunction onConfigurationChange(event: vscode.ConfigurationChangeEvent) {\n\tif (event.affectsConfiguration('crowdCode')) {\n\t\tupdateStatusBarItem()\n\t\tgetExportPath()\n\t}\n}\n\n/**\n * Gets the full path for a file or folder\n * @param item - The tree item representing the file or folder\n * @param exportPath - The base export path\n * @returns The full path to the file or folder\n */\nfunction getFullPath(item: RecordFile, exportPath: string): string {\n\t// If the item has a parent path (file inside a folder), construct the full path\n\tif (item.parentPath) {\n\t\treturn path.join(exportPath, item.parentPath, item.label)\n\t}\n\t// Otherwise, it's a root item\n\treturn path.join(exportPath, item.label)\n}\n\n/**\n * Deletes a file or folder recursively\n * @param filePath - The path to the file or folder to delete\n */\nasync function deleteFileOrFolder(filePath: string): Promise<void> {\n\ttry {\n\t\tconst stat = fs.statSync(filePath)\n\t\tif (stat.isDirectory()) {\n\t\t\t// Delete directory and its contents recursively\n\t\t\tfs.rmSync(filePath, { recursive: true, force: true })\n\t\t} else {\n\t\t\t// Delete single file\n\t\t\tfs.unlinkSync(filePath)\n\t\t}\n\t} catch (err) {\n\t\tconsole.error('Error deleting file or folder:', err)\n\t\tthrow err\n\t}\n}\n\nexport async function activate(context: vscode.ExtensionContext): Promise<void> {\n\textContext = context\n\toutputChannel.show()\n\tlogToOutput('Activating crowd-code', 'info')\n\n\t// Save anonUserId globally for user to copy\n\tconst userName = process.env.USER || process.env.USERNAME || ""coder"";\n\tconst machineId = vscode.env.machineId ?? null;\n\tconst rawId = `${machineId}:${userName}`;\n\tconst anonUserId = crypto.createHash('sha256').update(rawId).digest('hex') as string;\n\n\textContext.globalState.update('userId', anonUserId);\n\n\t// Register userID display\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand('crowd-code.showUserId', () => {\n\t\t\tconst userId = extContext.globalState.get<string>('userId');\n\t\t\tif (!userId) {\n\t\t\t\tvscode.window.showWarningMessage(""User ID not registered yet. Please wait a few seconds until the extension is fully activated."");\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tvscode.window.showInformationMessage(`Your User ID is: ${userId}`);\n\t\t}))\n\n\n\t// Register Record Files Provider\n\tconst recordFilesProvider = new RecordFilesProvider()\n\tcontext.subscriptions.push(\n\t\tvscode.window.registerTreeDataProvider('recordFiles', recordFilesProvider)\n\t)\n\n\t// Register Actions Provider\n\tactionsProvider = new ActionsProvider()\n\tcontext.subscriptions.push(vscode.window.registerTreeDataProvider('actions', actionsProvider))\n\n\t// Register refresh command\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand('crowd-code.refreshRecordFiles', () => {\n\t\t\trecordFilesProvider.refresh()\n\t\t})\n\t)\n\n\t// Register delete command\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand(\n\t\t\t'crowd-code.deleteRecordFile',\n\t\t\tasync (item: RecordFile) => {\n\t\t\t\tconst exportPath = getExportPath()\n\t\t\t\tif (!exportPath) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tconst result = await vscode.window.showWarningMessage(\n\t\t\t\t\t`Are you sure you want to delete ${item.label}?`,\n\t\t\t\t\t'Yes',\n\t\t\t\t\t'No'\n\t\t\t\t)\n\n\t\t\t\tif (result === 'Yes') {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst itemPath = getFullPath(item, exportPath)\n\t\t\t\t\t\tawait deleteFileOrFolder(itemPath)\n\t\t\t\t\t\trecordFilesProvider.refresh()\n\t\t\t\t\t} catch (err) {\n\t\t\t\t\t\tvscode.window.showErrorMessage(`Error deleting ${item.label}: ${err}`)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t)\n\t)\n\n\t// Register reveal in explorer command\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand('crowd-code.revealInExplorer', (item: RecordFile) => {\n\t\t\tconst exportPath = getExportPath()\n\t\t\tif (!exportPath) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tconst itemPath = getFullPath(item, exportPath)\n\t\t\tvscode.commands.executeCommand('revealFileInOS', vscode.Uri.file(itemPath))\n\t\t})\n\t)\n\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand(commands.startRecording, () => {\n\t\t\tstartRecording()\n\t\t})\n\t)\n\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand(commands.stopRecording, () => {\n\t\t\tstopRecording()\n\t\t})\n\t)\n\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand(commands.panicButton, () => {\n\t\t\tpanicButton()\n\t\t})\n\t)\n\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand(commands.openSettings, () => {\n\t\t\tvscode.commands.executeCommand(\n\t\t\t\t'workbench.action.openSettings',\n\t\t\t\t'@ext:MattiaConsiglio.crowd-code'\n\t\t\t)\n\t\t})\n\t)\n\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand('crowd-code.addToGitignore', async () => {\n\t\t\tawait addToGitignore()\n\t\t})\n\t)\n\n\t// Register consent management command\n\tcontext.subscriptions.push(\n\t\tvscode.commands.registerCommand('crowd-code.consent', async () => {\n\t\t\tawait showConsentChangeDialog()\n\t\t})\n\t)\n\n\n\tcontext.subscriptions.push(vscode.workspace.onDidChangeConfiguration(onConfigurationChange))\n\n\tvscode.window.onDidChangeActiveTextEditor(editor => {\n\t\tupdateStatusBarItem()\n\t\tif (editor && recording.isRecording) {\n\t\t\tif (isCurrentFileExported()) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconst currentFileUri = editor.document.uri.toString()\n\t\t\tlet tabEventText = ''\n\n\t\t\tif (recording.activatedFiles) {\n\t\t\t\tif (!recording.activatedFiles.has(currentFileUri)) {\n\t\t\t\t\ttabEventText = editor.document.getText()\n\t\t\t\t\trecording.activatedFiles.add(currentFileUri)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tthrow new Error(""Warning: recording.activatedFiles was not available during TAB event logging."")\n\t\t\t}\n\n\t\t\trecording.sequence++\n\t\t\taddToFileQueue(\n\t\t\t\tbuildCsvRow({\n\t\t\t\t\tsequence: recording.sequence,\n\t\t\t\t\trangeOffset: 0,\n\t\t\t\t\trangeLength: 0,\n\t\t\t\t\ttext: tabEventText,\n\t\t\t\t\ttype: ChangeType.TAB,\n\t\t\t\t})\n\t\t\t)\n\t\t\tappendToFile()\n\t\t\tactionsProvider.setCurrentFile(editor.document.fileName)\n\t\t}\n\t})\n\n\tcontext.subscriptions.push(\n\t\tvscode.window.onDidChangeTextEditorSelection(event => {\n\t\t\tif (recording.isRecording && event.textEditor === vscode.window.activeTextEditor) {\n\t\t\t\tif (isCurrentFileExported()) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tconst editor = event.textEditor\n\t\t\t\t// For simplicity, we'll log the primary selection.\n\t\t\t\tconst selection = event.selections[0]\n\t\t\t\tif (!selection) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tconst selectedText = editor.document.getText(selection)\n\t\t\t\tlet changeType: string\n\n\t\t\t\tswitch (event.kind) {\n\t\t\t\t\tcase vscode.TextEditorSelectionChangeKind.Keyboard:\n\t\t\t\t\t\tchangeType = ChangeType.SELECTION_KEYBOARD\n\t\t\t\t\t\tbreak\n\t\t\t\t\tcase vscode.TextEditorSelectionChangeKind.Mouse:\n\t\t\t\t\t\tchangeType = ChangeType.SELECTION_MOUSE\n\t\t\t\t\t\tbreak\n\t\t\t\t\tcase vscode.TextEditorSelectionChangeKind.Command:\n\t\t\t\t\t\tchangeType = ChangeType.SELECTION_COMMAND\n\t\t\t\t\t\tbreak\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tthrow new TypeError(""Unknown selection change kind."")\n\t\t\t\t}\n\n\t\t\t\trecording.sequence++\n\t\t\t\tconst csvRowParams: CSVRowBuilder = {\n\t\t\t\t\tsequence: recording.sequence,\n\t\t\t\t\trangeOffset: editor.document.offsetAt(selection.start),\n\t\t\t\t\trangeLength: editor.document.offsetAt(selection.end) - editor.document.offsetAt(selection.start),\n\t\t\t\t\ttext: selectedText,\n\t\t\t\t\ttype: changeType,\n\t\t\t\t}\n\t\t\t\taddToFileQueue(buildCsvRow(csvRowParams))\n\t\t\t\tappendToFile()\n\t\t\t\tactionsProvider.setCurrentFile(editor.document.fileName)\n\t\t\t}\n\t\t})\n\t)\n\n\tcontext.subscriptions.push(\n\t\tvscode.window.onDidChangeActiveTerminal((terminal: vscode.Terminal | undefined) => {\n\t\t\tif (terminal && recording.isRecording) {\n\t\t\t\tif (isCurrentFileExported()) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trecording.sequence++\n\t\t\t\taddToFileQueue(\n\t\t\t\t\tbuildCsvRow({\n\t\t\t\t\t\tsequence: recording.sequence,\n\t\t\t\t\t\trangeOffset: 0,\n\t\t\t\t\t\trangeLength: 0,\n\t\t\t\t\t\ttext: terminal.name,\n\t\t\t\t\t\ttype: ChangeType.TERMINAL_FOCUS,\n\t\t\t\t\t})\n\t\t\t\t)\n\t\t\t\tappendToFile()\n\t\t\t\tactionsProvider.setCurrentFile(`Terminal: ${terminal.name}`)\n\t\t\t}\n\t\t})\n\t)\n\n\tcontext.subscriptions.push(\n\t\tvscode.window.onDidStartTerminalShellExecution(async (event: vscode.TerminalShellExecutionStartEvent) => {\n\t\t\tif (recording.isRecording) {\n\t\t\t\tif (isCurrentFileExported()) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tconst commandLine = event.execution.commandLine.value\n\t\t\t\trecording.sequence++\n\t\t\t\taddToFileQueue(\n\t\t\t\t\tbuildCsvRow({\n\t\t\t\t\t\tsequence: recording.sequence,\n\t\t\t\t\t\trangeOffset: 0,\n\t\t\t\t\t\trangeLength: 0,\n\t\t\t\t\t\ttext: commandLine,\n\t\t\t\t\t\ttype: ChangeType.TERMINAL_COMMAND,\n\t\t\t\t\t})\n\t\t\t\t)\n\t\t\t\tappendToFile()\n\n\t\t\t\tconst stream = event.execution.read()\n\t\t\t\tfor await (const data of stream) {\n\t\t\t\t\trecording.sequence++\n\t\t\t\t\taddToFileQueue(\n\t\t\t\t\t\tbuildCsvRow({ sequence: recording.sequence, rangeOffset: 0, rangeLength: 0, text: data, type: ChangeType.TERMINAL_OUTPUT })\n\t\t\t\t\t)\n\t\t\t\t\tappendToFile()\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t)\n\n\tstatusBarItem = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Right, 9000)\n\tupdateStatusBarItem()\n\tcontext.subscriptions.push(statusBarItem)\n\n\t// Ensure consent is obtained when the extension is first activated\n\tawait ensureConsent()\n\n\t// Autostart recording regardless of consent. The consent only gates data upload.\n\tstartRecording().catch(err => logToOutput(`Autostart recording failed unexpectedly: ${err}`, 'error'))\n\n\t// Initialize git provider for branch checkout detection\n\tinitializeGitProvider()\n}\n\nexport function deactivate(): void {\n\tlogToOutput('Deactivating crowd-code', 'info')\n\tif (recording.isRecording) {\n\t\tstopRecording()\n\t}\n\tcleanupGitProvider()\n\tstatusBarItem.dispose()\n}\n",typescript,tab
|
| 8 |
+
7,327624,"/home/franz.srambical/crowd-code/src/extension.ts",2113,0,"",typescript,selection_command
|
| 9 |
+
8,328461,"/home/franz.srambical/crowd-code/src/extension.ts",2091,81,"export async function activate(context: vscode.ExtensionContext): Promise<void> {",typescript,selection_command
|
| 10 |
+
9,767133,"/home/franz.srambical/crowd-code/src/extension.ts",0,0,"",typescript,tab
|
| 11 |
+
10,768375,"/home/franz.srambical/crowd-code/src/extension.ts",2171,0,"",typescript,selection_command
|
| 12 |
+
11,772126,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:4\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/tab_model/logs/franz/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/tab_model/logs/franz/%x_%j.log\n#SBATCH --job-name=crowd_pilot_sglang\n#SBATCH --mem=400GB\n#SBATCH --qos=normal\n\nexport HF_HOME=/fast/project/HFMI_SynergyUnit/tab_model/franz/hf_home/\n\nsource /home/franz.srambical/crowd-pilot-serializer-legacy/.venv/bin/activate\nmodule load CUDA/12.8\n\nmodel_path=""zai-org/GLM-4.5-Air""\npython3 -m sglang.launch_server --model-path $model_path --host 0.0.0.0 --log-requests \\n --tp-size 4 \\n --tool-call-parser glm45 \\n --reasoning-parser glm45 \\n --speculative-algorithm EAGLE \\n --speculative-num-steps 3 \\n --speculative-eagle-topk 1 \\n --speculative-num-draft-tokens 4 \\n --mem-fraction-static 0.9",shellscript,tab
|
| 13 |
+
12,772783,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",39,0,"",shellscript,selection_command
|
| 14 |
+
13,772930,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",67,0,"",shellscript,selection_command
|
| 15 |
+
14,773182,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",91,0,"",shellscript,selection_command
|
| 16 |
+
15,773204,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",117,0,"",shellscript,selection_command
|
| 17 |
+
16,773239,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",138,0,"",shellscript,selection_command
|
| 18 |
+
17,773267,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",217,0,"",shellscript,selection_command
|
| 19 |
+
18,773307,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",295,0,"",shellscript,selection_command
|
| 20 |
+
19,773333,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",333,0,"",shellscript,selection_command
|
| 21 |
+
20,773379,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",353,0,"",shellscript,selection_command
|
| 22 |
+
21,773402,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",374,0,"",shellscript,selection_command
|
| 23 |
+
22,773435,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",375,0,"",shellscript,selection_command
|
| 24 |
+
23,773469,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",446,0,"",shellscript,selection_command
|
| 25 |
+
24,773511,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",447,0,"",shellscript,selection_command
|
| 26 |
+
25,773534,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",525,0,"",shellscript,selection_command
|
| 27 |
+
26,773572,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",547,0,"",shellscript,selection_command
|
| 28 |
+
27,773607,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",548,0,"",shellscript,selection_command
|
| 29 |
+
28,773640,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",581,0,"",shellscript,selection_command
|
| 30 |
+
29,773677,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",670,0,"",shellscript,selection_command
|
| 31 |
+
30,773700,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",686,0,"",shellscript,selection_command
|
| 32 |
+
31,773739,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",716,0,"",shellscript,selection_command
|
| 33 |
+
32,773768,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",745,0,"",shellscript,selection_command
|
| 34 |
+
33,773813,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",779,0,"",shellscript,selection_command
|
| 35 |
+
34,773845,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",809,0,"",shellscript,selection_command
|
| 36 |
+
35,773874,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",840,0,"",shellscript,selection_command
|
| 37 |
+
36,774319,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",877,0,"",shellscript,selection_command
|
| 38 |
+
37,775152,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",904,0,"",shellscript,selection_command
|
| 39 |
+
38,775703,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",904,0," ",shellscript,content
|
| 40 |
+
39,775704,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",905,0,"",shellscript,selection_keyboard
|
| 41 |
+
40,775894,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",905,0,"|",shellscript,content
|
| 42 |
+
41,775894,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",906,0,"",shellscript,selection_keyboard
|
| 43 |
+
42,776438,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",905,1,"",shellscript,content
|
| 44 |
+
43,776848,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",905,0,"\",shellscript,content
|
| 45 |
+
44,776849,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",906,0,"",shellscript,selection_keyboard
|
| 46 |
+
45,777293,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",906,0,"\n ",shellscript,content
|
| 47 |
+
46,777951,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",909,0,"-",shellscript,content
|
| 48 |
+
47,777952,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",910,0,"",shellscript,selection_keyboard
|
| 49 |
+
48,778036,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",910,0,"-",shellscript,content
|
| 50 |
+
49,778037,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",911,0,"",shellscript,selection_keyboard
|
| 51 |
+
50,778344,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",911,0," ",shellscript,content
|
| 52 |
+
51,778344,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",912,0,"",shellscript,selection_keyboard
|
| 53 |
+
52,778658,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",911,1,"",shellscript,content
|
| 54 |
+
53,785062,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",911,0,"s",shellscript,content
|
| 55 |
+
54,785063,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",912,0,"",shellscript,selection_keyboard
|
| 56 |
+
55,785108,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",912,0,"e",shellscript,content
|
| 57 |
+
56,785108,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",913,0,"",shellscript,selection_keyboard
|
| 58 |
+
57,785152,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",913,0,"r",shellscript,content
|
| 59 |
+
58,785153,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",914,0,"",shellscript,selection_keyboard
|
| 60 |
+
59,785316,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",914,0,"v",shellscript,content
|
| 61 |
+
60,785316,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",915,0,"",shellscript,selection_keyboard
|
| 62 |
+
61,785457,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",915,0,"e",shellscript,content
|
| 63 |
+
62,785458,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",916,0,"",shellscript,selection_keyboard
|
| 64 |
+
63,785686,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",916,0,"d",shellscript,content
|
| 65 |
+
64,785687,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",917,0,"",shellscript,selection_keyboard
|
| 66 |
+
65,786145,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",917,0,"\n ",shellscript,content
|
| 67 |
+
66,786980,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",918,2,"",shellscript,content
|
| 68 |
+
67,787112,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",917,1,"",shellscript,content
|
| 69 |
+
68,787754,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",917,0,"-",shellscript,content
|
| 70 |
+
69,787754,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",918,0,"",shellscript,selection_keyboard
|
| 71 |
+
70,788402,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",918,0,"m",shellscript,content
|
| 72 |
+
71,788403,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",919,0,"",shellscript,selection_keyboard
|
| 73 |
+
72,788454,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",919,0,"o",shellscript,content
|
| 74 |
+
73,788454,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",920,0,"",shellscript,selection_keyboard
|
| 75 |
+
74,788562,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",920,0,"d",shellscript,content
|
| 76 |
+
75,788562,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",921,0,"",shellscript,selection_keyboard
|
| 77 |
+
76,788666,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",921,0,"e",shellscript,content
|
| 78 |
+
77,788667,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",922,0,"",shellscript,selection_keyboard
|
| 79 |
+
78,788734,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",922,0,"l",shellscript,content
|
| 80 |
+
79,788734,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",923,0,"",shellscript,selection_keyboard
|
| 81 |
+
80,789990,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",923,0,"-",shellscript,content
|
| 82 |
+
81,789990,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",924,0,"",shellscript,selection_keyboard
|
| 83 |
+
82,792224,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",907,17," --served-model-name\n",shellscript,content
|
| 84 |
+
83,792699,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",929,0,"l",shellscript,content
|
| 85 |
+
84,792699,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",930,0,"",shellscript,selection_keyboard
|
| 86 |
+
85,793211,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",929,1,"",shellscript,content
|
| 87 |
+
86,793368,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",928,1,"",shellscript,content
|
| 88 |
+
87,796891,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",907,21," --served-model-name GLM-4.5-Air\n",shellscript,content
|
| 89 |
+
88,800292,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",907,0,"",shellscript,selection_command
|
| 90 |
+
89,801072,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",909,0,"",shellscript,selection_command
|
| 91 |
+
90,801828,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",911,0,"",shellscript,selection_command
|
| 92 |
+
91,801968,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",917,0,"",shellscript,selection_command
|
| 93 |
+
92,802136,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",918,0,"",shellscript,selection_command
|
| 94 |
+
93,802310,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",923,0,"",shellscript,selection_command
|
| 95 |
+
94,802737,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",907,34," --served-model-name zai-org/GLM-4.5-Air\n",shellscript,content
|
| 96 |
+
95,803852,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",949,0,"",shellscript,selection_command
|
| 97 |
+
96,804474,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",948,1,"",shellscript,content
|
| 98 |
+
97,804488,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",909,0,"",shellscript,selection_command
|
| 99 |
+
98,815568,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/tab_model/logs/franz/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/tab_model/logs/franz/%x_%j.log\n#SBATCH --job-name=crowd_pilot_sglang\n#SBATCH --mem=400GB\n#SBATCH --qos=normal\n\nsource /home/franz.srambical/crowd-pilot-serializer-legacy/.venv/bin/activate\nmodule load CUDA/12.8\n\nmodel_path=""Qwen/Qwen3-8b""\n#model_path=""/fast/project/HFMI_SynergyUnit/tab_model/data/checkpoints/trained_checkpoints_converted_to_hf/qwen3_600m_full_finetune_crowd_code_dataset_4k_tokens_batch_size_1/2025-11-23_18-58-56/checkpoints/model_name=0--val_loss=0.30-step=949-consumed_samples=950.0""\npython3 -m sglang.launch_server --model-path $model_path --host 0.0.0.0 --log-requests",shellscript,tab
|
| 100 |
+
99,816792,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",856,0,"",shellscript,selection_mouse
|
| 101 |
+
100,816794,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",855,0,"",shellscript,selection_command
|
| 102 |
+
101,818151,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",856,0,"",shellscript,selection_command
|
| 103 |
+
102,818581,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",856,0," ",shellscript,content
|
| 104 |
+
103,818581,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",857,0,"",shellscript,selection_keyboard
|
| 105 |
+
104,819149,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",857,0,"-",shellscript,content
|
| 106 |
+
105,819150,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",858,0,"",shellscript,selection_keyboard
|
| 107 |
+
106,819272,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",858,0,"-",shellscript,content
|
| 108 |
+
107,819273,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",859,0,"",shellscript,selection_keyboard
|
| 109 |
+
108,820431,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",859,0,"s",shellscript,content
|
| 110 |
+
109,820431,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",860,0,"",shellscript,selection_keyboard
|
| 111 |
+
110,820432,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",860,0,"e",shellscript,content
|
| 112 |
+
111,820432,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",861,0,"",shellscript,selection_keyboard
|
| 113 |
+
112,820457,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",861,0,"r",shellscript,content
|
| 114 |
+
113,820458,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",862,0,"",shellscript,selection_keyboard
|
| 115 |
+
114,820644,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",862,0,"v",shellscript,content
|
| 116 |
+
115,820645,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",863,0,"",shellscript,selection_keyboard
|
| 117 |
+
116,820762,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",863,0,"e",shellscript,content
|
| 118 |
+
117,820762,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",864,0,"",shellscript,selection_keyboard
|
| 119 |
+
118,820979,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",864,0,"d",shellscript,content
|
| 120 |
+
119,820979,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",865,0,"",shellscript,selection_keyboard
|
| 121 |
+
120,822344,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",865,0,"-",shellscript,content
|
| 122 |
+
121,822344,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",866,0,"",shellscript,selection_keyboard
|
| 123 |
+
122,822596,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",866,0,"m",shellscript,content
|
| 124 |
+
123,822597,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",867,0,"",shellscript,selection_keyboard
|
| 125 |
+
124,822623,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",867,0,"o",shellscript,content
|
| 126 |
+
125,822624,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",868,0,"",shellscript,selection_keyboard
|
| 127 |
+
126,822746,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",868,0,"d",shellscript,content
|
| 128 |
+
127,822746,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",869,0,"",shellscript,selection_keyboard
|
| 129 |
+
128,822830,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",869,0,"e",shellscript,content
|
| 130 |
+
129,822830,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",870,0,"",shellscript,selection_keyboard
|
| 131 |
+
130,822942,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",870,0,"l",shellscript,content
|
| 132 |
+
131,822942,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",871,0,"",shellscript,selection_keyboard
|
| 133 |
+
132,823754,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",871,0,"-",shellscript,content
|
| 134 |
+
133,823754,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",872,0,"",shellscript,selection_keyboard
|
| 135 |
+
134,824958,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",872,0,"n",shellscript,content
|
| 136 |
+
135,824958,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",873,0,"",shellscript,selection_keyboard
|
| 137 |
+
136,825099,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",873,0,"a",shellscript,content
|
| 138 |
+
137,825099,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",874,0,"",shellscript,selection_keyboard
|
| 139 |
+
138,825157,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",874,0,"m",shellscript,content
|
| 140 |
+
139,825158,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",875,0,"",shellscript,selection_keyboard
|
| 141 |
+
140,825532,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",875,0,"e",shellscript,content
|
| 142 |
+
141,825532,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",876,0,"",shellscript,selection_keyboard
|
| 143 |
+
142,826354,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",876,0," ",shellscript,content
|
| 144 |
+
143,826355,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",877,0,"",shellscript,selection_keyboard
|
| 145 |
+
144,831431,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",877,0,"$",shellscript,content
|
| 146 |
+
145,831432,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",878,0,"",shellscript,selection_keyboard
|
| 147 |
+
146,831817,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",878,0,"m",shellscript,content
|
| 148 |
+
147,831817,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",879,0,"",shellscript,selection_keyboard
|
| 149 |
+
148,831883,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",879,0,"o",shellscript,content
|
| 150 |
+
149,831883,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",880,0,"",shellscript,selection_keyboard
|
| 151 |
+
150,832118,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",880,0,"e",shellscript,content
|
| 152 |
+
151,832118,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",881,0,"",shellscript,selection_keyboard
|
| 153 |
+
152,832734,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",880,1,"",shellscript,content
|
| 154 |
+
153,832750,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",880,0,"d",shellscript,content
|
| 155 |
+
154,832750,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",881,0,"",shellscript,selection_keyboard
|
| 156 |
+
155,832841,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",881,0,"l",shellscript,content
|
| 157 |
+
156,832841,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",882,0,"",shellscript,selection_keyboard
|
| 158 |
+
157,832842,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",882,0,"e",shellscript,content
|
| 159 |
+
158,832842,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",883,0,"",shellscript,selection_keyboard
|
| 160 |
+
159,833827,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",882,1,"",shellscript,content
|
| 161 |
+
160,834048,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",881,1,"",shellscript,content
|
| 162 |
+
161,834050,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",881,0,"e",shellscript,content
|
| 163 |
+
162,834050,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",882,0,"",shellscript,selection_keyboard
|
| 164 |
+
163,834274,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",882,0,"l",shellscript,content
|
| 165 |
+
164,834274,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",883,0,"",shellscript,selection_keyboard
|
| 166 |
+
165,834475,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",883,0,"_",shellscript,content
|
| 167 |
+
166,834476,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",884,0,"",shellscript,selection_keyboard
|
| 168 |
+
167,834751,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",884,0,"p",shellscript,content
|
| 169 |
+
168,834752,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",885,0,"",shellscript,selection_keyboard
|
| 170 |
+
169,834937,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",885,0,"a",shellscript,content
|
| 171 |
+
170,834937,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",886,0,"",shellscript,selection_keyboard
|
| 172 |
+
171,834947,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",886,0,"t",shellscript,content
|
| 173 |
+
172,834947,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",887,0,"",shellscript,selection_keyboard
|
| 174 |
+
173,835009,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",887,0,"h",shellscript,content
|
| 175 |
+
174,835009,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",888,0,"",shellscript,selection_keyboard
|
| 176 |
+
175,836052,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server.sh",887,0,"",shellscript,selection_command
|
| 177 |
+
176,843866,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",0,0,"",shellscript,tab
|
| 178 |
+
177,1049314,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",0,0,"",shellscript,tab
|
| 179 |
+
178,1422728,"/fast/home/franz.srambical/crowd-pilot-serializer/crates/core/src/git_pull_analysis.rs",0,0,"//! Analysis module for detecting ""out-of-sync"" data caused by git pull operations.\n//!\n//! The VS Code IDE capture extension invalidates its file cache on git checkout,\n//! but NOT on git pull. This means if a user:\n//! 1. Visits files (content cached)\n//! 2. Runs git pull (files change on disk, but cache not invalidated)\n//! 3. Edits those files\n//!\n//! The reconstructed file state will be incorrect because edits are applied\n//! to stale cached content.\n//!\n//! This module detects such sequences in captured session data.\n\nuse std::collections::{HashMap, HashSet};\nuse std::path::Path;\n\nuse serde::{Deserialize, Serialize};\n\n/// Result of analyzing a single session for git pull issues.\n#[derive(Debug, Clone, Serialize)]\npub struct SessionAnalysis {\n /// Path to the CSV file analyzed\n pub csv_path: String,\n /// All git pull events detected in this session\n pub git_pulls: Vec<GitPullEvent>,\n /// Files that were visited before any git pull\n pub files_visited_before_pull: HashSet<String>,\n /// Whether this session is potentially corrupted\n pub is_corrupted: bool,\n /// Details about the corruption if any\n pub corruption_details: Option<CorruptionDetails>,\n}\n\n/// A detected git pull event.\n#[derive(Debug, Clone, Serialize)]\npub struct GitPullEvent {\n /// Sequence number of the terminal_command event\n pub sequence: i64,\n /// The full command string\n pub command: String,\n /// Files reported as changed in the terminal output (if parseable)\n pub changed_files: Vec<String>,\n /// Whether we could parse the output\n pub output_parsed: bool,\n}\n\n/// Details about detected corruption.\n#[derive(Debug, Clone, Serialize)]\npub struct CorruptionDetails {\n /// Files that were: visited before pull, changed by pull, AND edited after pull\n pub affected_files: Vec<String>,\n /// Number of content events on affected files after the pull\n pub affected_edit_count: usize,\n}\n\n/// Overall analysis result across all sessions.\n#[derive(Debug, Clone, Serialize)]\npub struct AnalysisResult {\n /// Total number of sessions analyzed\n pub total_sessions: usize,\n /// Sessions with at least one terminal git pull\n pub sessions_with_git_pull: usize,\n /// Sessions detected as potentially corrupted\n pub corrupted_sessions: usize,\n /// Total git pull events detected\n pub total_git_pulls: usize,\n /// Git pulls where we could parse the output for changed files\n pub parseable_git_pulls: usize,\n /// Total affected files across all corrupted sessions\n pub total_affected_files: usize,\n /// Total affected edit events across all corrupted sessions\n pub total_affected_edits: usize,\n /// Percentage of sessions that are corrupted\n pub corruption_rate: f64,\n /// List of corrupted session paths for review\n pub corrupted_session_paths: Vec<String>,\n}\n\n/// A row from the CSV file (for analysis purposes).\n#[derive(Debug, Deserialize)]\n#[serde(rename_all = ""PascalCase"")]\nstruct CsvRow {\n #[serde(rename = ""Sequence"")]\n sequence: Option<i64>,\n file: String,\n text: Option<String>,\n #[serde(rename = ""Type"")]\n event_type: String,\n}\n\n/// State machine for tracking events during session analysis.\n#[derive(Debug, Default)]\nstruct AnalysisState {\n /// Files visited (TAB events) - maps file path to first visit sequence\n files_visited: HashMap<String, i64>,\n /// Detected git pull events\n git_pulls: Vec<GitPullEvent>,\n /// Pending terminal command that might be git pull\n pending_git_pull: Option<(i64, String)>,\n /// Buffer for terminal output following a git pull\n terminal_output_buffer: Vec<String>,\n /// Content events after each git pull - maps file path to edit count\n content_events_after_pull: HashMap<String, usize>,\n /// Sequence number of the last git pull (for tracking ""after"" events)\n last_git_pull_sequence: Option<i64>,\n /// Current sequence number\n current_sequence: i64,\n}\n\nimpl AnalysisState {\n fn new() -> Self {\n Self::default()\n }\n\n fn handle_tab_event(&mut self, file: &str, sequence: i64) {\n // Only record the first visit to each file\n self.files_visited.entry(file.to_string()).or_insert(sequence);\n }\n\n fn handle_terminal_command(&mut self, command: &str, sequence: i64) {\n // First, flush any pending git pull with its accumulated output\n self.flush_pending_git_pull();\n\n // Check if this is a git pull command\n let command_normalized = command.replace(""\\n"", ""\n"").replace(""\\r"", ""\r"").trim().to_string();\n\n if is_git_pull_command(&command_normalized) {\n self.pending_git_pull = Some((sequence, command_normalized));\n self.terminal_output_buffer.clear();\n }\n }\n\n fn handle_terminal_output(&mut self, output: &str) {\n if self.pending_git_pull.is_some() {\n let output_normalized = output.replace(""\\n"", ""\n"").replace(""\\r"", ""\r"");\n self.terminal_output_buffer.push(output_normalized);\n }\n }\n\n fn handle_content_event(&mut self, file: &str, sequence: i64) {\n // Track content events that happen after a git pull\n if let Some(pull_seq) = self.last_git_pull_sequence {\n if sequence > pull_seq {\n *self.content_events_after_pull.entry(file.to_string()).or_insert(0) += 1;\n }\n }\n self.current_sequence = sequence;\n }\n\n fn handle_git_branch_checkout(&mut self) {\n // A branch checkout invalidates the cache, so we reset tracking\n // Files visited before checkout don't matter for corruption detection\n // after this point because the cache would be cleared\n self.files_visited.clear();\n self.content_events_after_pull.clear();\n self.last_git_pull_sequence = None;\n }\n\n fn flush_pending_git_pull(&mut self) {\n if let Some((sequence, command)) = self.pending_git_pull.take() {\n let combined_output = self.terminal_output_buffer.join("""");\n let (changed_files, output_parsed) = parse_git_pull_output(&combined_output);\n\n self.git_pulls.push(GitPullEvent {\n sequence,\n command,\n changed_files,\n output_parsed,\n });\n\n self.last_git_pull_sequence = Some(sequence);\n self.terminal_output_buffer.clear();\n }\n }\n\n fn finalize(mut self) -> SessionAnalysis {\n // Flush any remaining pending git pull\n self.flush_pending_git_pull();\n\n let files_visited_before_pull: HashSet<String> = if let Some(first_pull) = self.git_pulls.first() {\n self.files_visited\n .iter()\n .filter(|(_, &seq)| seq < first_pull.sequence)\n .map(|(path, _)| path.clone())\n .collect()\n } else {\n HashSet::new()\n };\n\n // Determine corruption: files that were visited before pull AND edited after\n let mut affected_files = Vec::new();\n let mut affected_edit_count = 0;\n\n // Get all files changed by any git pull\n let files_changed_by_pulls: HashSet<String> = self.git_pulls\n .iter()\n .flat_map(|p| p.changed_files.iter().cloned())\n .collect();\n\n for (file, edit_count) in &self.content_events_after_pull {\n // File is affected if:\n // 1. It was visited before the pull (in cache)\n // 2. It was changed by the pull (or we couldn't parse output)\n // 3. It was edited after the pull\n let was_visited_before = files_visited_before_pull.contains(file);\n let was_changed_by_pull = files_changed_by_pulls.contains(file)\n || self.git_pulls.iter().any(|p| !p.output_parsed); // Conservative: if we can't parse, assume affected\n\n if was_visited_before && was_changed_by_pull {\n affected_files.push(file.clone());\n affected_edit_count += edit_count;\n }\n }\n\n let is_corrupted = !affected_files.is_empty();\n let corruption_details = if is_corrupted {\n Some(CorruptionDetails {\n affected_files: affected_files.clone(),\n affected_edit_count,\n })\n } else {\n None\n };\n\n SessionAnalysis {\n csv_path: String::new(), // Will be set by caller\n git_pulls: self.git_pulls,\n files_visited_before_pull,\n is_corrupted,\n corruption_details,\n }\n }\n}\n\n/// Check if a command is a git pull (or equivalent).\nfn is_git_pull_command(command: &str) -> bool {\n let cmd = command.trim().to_lowercase();\n\n // Direct git pull\n if cmd.starts_with(""git pull"") {\n return true;\n }\n\n // git fetch followed by git merge (often in a single command)\n if cmd.contains(""git fetch"") && cmd.contains(""git merge"") {\n return true;\n }\n\n // Common aliases\n if cmd == ""gpl"" || cmd == ""gl"" || cmd.starts_with(""gpl "") || cmd.starts_with(""gl "") {\n return true;\n }\n\n false\n}\n\n/// Parse git pull output to extract changed files.\n///\n/// Returns (changed_files, was_parseable).\nfn parse_git_pull_output(output: &str) -> (Vec<String>, bool) {\n let mut changed_files = Vec::new();\n let mut found_any_pattern = false;\n\n for line in output.lines() {\n let line = line.trim();\n\n // Pattern 1: ""path/to/file.rs | 10 ++-"" (diff stat format)\n if line.contains('|') && (line.contains('+') || line.contains('-')) {\n if let Some(file_part) = line.split('|').next() {\n let file = file_part.trim();\n if !file.is_empty() && !file.contains(""changed"") && !file.contains(""insertion"") {\n changed_files.push(normalize_file_path(file));\n found_any_pattern = true;\n }\n }\n }\n\n // Pattern 2: ""create mode 100644 path/to/file.rs""\n if line.starts_with(""create mode"") || line.starts_with(""delete mode"") {\n let parts: Vec<&str> = line.split_whitespace().collect();\n if parts.len() >= 4 {\n changed_files.push(normalize_file_path(parts[3]));\n found_any_pattern = true;\n }\n }\n\n // Pattern 3: ""rename path/to/old.rs => path/to/new.rs""\n if line.starts_with(""rename "") && line.contains(""=>"") {\n if let Some(arrow_pos) = line.find(""=>"") {\n let new_file = line[arrow_pos + 2..].trim();\n // Handle the {old => new} format\n if let Some(brace_end) = new_file.find('}') {\n let base = &line[7..line.find('{').unwrap_or(7)];\n let new_part = &new_file[..brace_end];\n changed_files.push(normalize_file_path(&format!(""{}{}"", base.trim(), new_part.trim())));\n } else {\n changed_files.push(normalize_file_path(new_file));\n }\n found_any_pattern = true;\n }\n }\n }\n\n // Check for ""Already up to date"" which means no changes\n if output.contains(""Already up to date"") || output.contains(""Already up-to-date"") {\n return (Vec::new(), true);\n }\n\n // Check for fast-forward indicator\n if output.contains(""Fast-forward"") && changed_files.is_empty() && !found_any_pattern {\n // Fast-forward happened but we couldn't parse files - mark as unparseable\n return (Vec::new(), false);\n }\n\n (changed_files, found_any_pattern || output.contains(""Already up to date""))\n}\n\n/// Normalize a file path for comparison.\nfn normalize_file_path(path: &str) -> String {\n // Remove leading ./ if present\n let path = path.strip_prefix(""./"").unwrap_or(path);\n // Remove any trailing whitespace or special chars\n path.trim().to_string()\n}\n\n/// Analyze a single CSV session file for git pull issues.\npub fn analyze_session(csv_path: &Path) -> Result<SessionAnalysis, Box<dyn std::error::Error>> {\n let mut state = AnalysisState::new();\n let mut reader = csv::Reader::from_path(csv_path)?;\n\n for result in reader.deserialize() {\n let row: CsvRow = result?;\n let sequence = row.sequence.unwrap_or(0);\n\n match row.event_type.as_str() {\n ""tab"" => {\n state.handle_tab_event(&row.file, sequence);\n }\n ""content"" => {\n state.handle_content_event(&row.file, sequence);\n }\n ""terminal_command"" => {\n if let Some(text) = &row.text {\n state.handle_terminal_command(text, sequence);\n }\n }\n ""terminal_output"" => {\n if let Some(text) = &row.text {\n state.handle_terminal_output(text);\n }\n }\n ""git_branch_checkout"" => {\n state.handle_git_branch_checkout();\n }\n _ => {}\n }\n }\n\n let mut analysis = state.finalize();\n analysis.csv_path = csv_path.to_string_lossy().to_string();\n Ok(analysis)\n}\n\n/// Analyze all sessions in a directory for git pull issues.\npub fn analyze_all_sessions(root: &Path) -> Result<AnalysisResult, Box<dyn std::error::Error>> {\n use rayon::prelude::*;\n use std::sync::atomic::{AtomicUsize, Ordering};\n use walkdir::WalkDir;\n\n // Discover CSV files\n let csv_files: Vec<_> = WalkDir::new(root)\n .into_iter()\n .filter_map(|e| e.ok())\n .filter(|e| e.path().extension().map_or(false, |ext| ext == ""csv""))\n .map(|e| e.path().to_path_buf())\n .collect();\n\n if csv_files.is_empty() {\n return Err(format!(""No CSV files found under {:?}"", root).into());\n }\n\n let total_files = csv_files.len();\n let processed_count = AtomicUsize::new(0);\n\n // Process in parallel\n let analyses: Vec<SessionAnalysis> = csv_files\n .into_par_iter()\n .filter_map(|csv_path| {\n let result = analyze_session(&csv_path);\n let count = processed_count.fetch_add(1, Ordering::Relaxed) + 1;\n\n if count % 100 == 0 || count == total_files {\n eprintln!(""Analyzed {}/{} sessions..."", count, total_files);\n }\n\n match result {\n Ok(analysis) => Some(analysis),\n Err(e) => {\n eprintln!(""Error analyzing {:?}: {}"", csv_path, e);\n None\n }\n }\n })\n .collect();\n\n // Aggregate results\n let total_sessions = analyses.len();\n let sessions_with_git_pull = analyses.iter().filter(|a| !a.git_pulls.is_empty()).count();\n let corrupted_sessions = analyses.iter().filter(|a| a.is_corrupted).count();\n let total_git_pulls: usize = analyses.iter().map(|a| a.git_pulls.len()).sum();\n let parseable_git_pulls: usize = analyses\n .iter()\n .flat_map(|a| a.git_pulls.iter())\n .filter(|p| p.output_parsed)\n .count();\n\n let total_affected_files: usize = analyses\n .iter()\n .filter_map(|a| a.corruption_details.as_ref())\n .map(|d| d.affected_files.len())\n .sum();\n\n let total_affected_edits: usize = analyses\n .iter()\n .filter_map(|a| a.corruption_details.as_ref())\n .map(|d| d.affected_edit_count)\n .sum();\n\n let corrupted_session_paths: Vec<String> = analyses\n .iter()\n .filter(|a| a.is_corrupted)\n .map(|a| a.csv_path.clone())\n .collect();\n\n let corruption_rate = if total_sessions > 0 {\n corrupted_sessions as f64 / total_sessions as f64\n } else {\n 0.0\n };\n\n Ok(AnalysisResult {\n total_sessions,\n sessions_with_git_pull,\n corrupted_sessions,\n total_git_pulls,\n parseable_git_pulls,\n total_affected_files,\n total_affected_edits,\n corruption_rate,\n corrupted_session_paths,\n })\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_is_git_pull_command() {\n assert!(is_git_pull_command(""git pull""));\n assert!(is_git_pull_command(""git pull origin main""));\n assert!(is_git_pull_command(""git pull --rebase""));\n assert!(is_git_pull_command("" git pull ""));\n assert!(is_git_pull_command(""GIT PULL""));\n\n assert!(!is_git_pull_command(""git push""));\n assert!(!is_git_pull_command(""git status""));\n assert!(!is_git_pull_command(""echo git pull""));\n }\n\n #[test]\n fn test_parse_git_pull_output_already_up_to_date() {\n let output = ""Already up to date.\n"";\n let (files, parsed) = parse_git_pull_output(output);\n assert!(parsed);\n assert!(files.is_empty());\n }\n\n #[test]\n fn test_parse_git_pull_output_with_changes() {\n let output = r#""\nUpdating abc123..def456\nFast-forward\n src/main.rs | 10 +++++-----\n src/lib.rs | 5 +++++\n 2 files changed, 10 insertions(+), 5 deletions(-)\n""#;\n let (files, parsed) = parse_git_pull_output(output);\n assert!(parsed);\n assert!(files.contains(&""src/main.rs"".to_string()));\n assert!(files.contains(&""src/lib.rs"".to_string()));\n }\n\n #[test]\n fn test_parse_git_pull_output_with_new_file() {\n let output = r#""\nUpdating abc123..def456\nFast-forward\n src/new_file.rs | 20 ++++++++++++++++++++\n create mode 100644 src/new_file.rs\n 1 file changed, 20 insertions(+)\n""#;\n let (files, parsed) = parse_git_pull_output(output);\n assert!(parsed);\n assert!(files.contains(&""src/new_file.rs"".to_string()));\n }\n}\n",rust,tab
|
| 180 |
+
179,1639548,"/fast/home/franz.srambical/crowd-pilot-serializer/crates/core/src/lib.rs",0,0,"//! Core serialization logic for crowd-pilot IDE interaction data.\n//!\n//! This crate provides the `ConversationStateManager` which converts IDE events\n//! (tab switches, edits, terminal commands, etc.) into conversation format\n//! suitable for training language models.\n\n/// Trait for tokenization operations.\n/// \n/// Implementors provide token counting and truncation capabilities.\n/// For exact tokenization (preprocessing), use a real tokenizer.\n/// For approximate tokenization (runtime), use character-based estimation.\npub trait Tokenizer {\n /// Count the number of tokens in the given text.\n fn count_tokens(&self, text: &str) -> usize;\n \n /// Truncate text to at most `max_tokens` tokens.\n /// Returns the truncated text.\n fn truncate_to_max_tokens(&self, text: &str, max_tokens: usize) -> String;\n}\n\n// Blanket implementation for references to Tokenizers\nimpl<T: Tokenizer + ?Sized> Tokenizer for &T {\n fn count_tokens(&self, text: &str) -> usize {\n (*self).count_tokens(text)\n }\n \n fn truncate_to_max_tokens(&self, text: &str, max_tokens: usize) -> String {\n (*self).truncate_to_max_tokens(text, max_tokens)\n }\n}\n\nmod conversation;\nmod diff;\npub mod git_pull_analysis;\nmod helpers;\npub mod pipeline;\n\npub use conversation::{ConversationMessage, ConversationStateManager, ConversationStateManagerConfig, FinalizedConversation, Role};\npub use pipeline::{\n discover_csv_files, process_all_sessions, process_session, write_jsonl_output,\n MilesMessage, MilesRecord, PipelineConfig, PipelineResult, SessionResult,\n};\npub use diff::{compute_changed_block_lines, ChangedBlock};\npub use helpers::{\n apply_backspaces, apply_change, clean_text, escape_single_quotes_for_sed, fenced_block,\n line_numbered_output, normalize_terminal_output, serialize_compute_viewport, Viewport,\n};\n\n/// Default viewport radius (lines above/below cursor to show)\npub const VIEWPORT_RADIUS: usize = 10;\n\n/// Default coalesce radius for grouping nearby edits\npub const COALESCE_RADIUS: usize = 5;\n\n/// Default maximum tokens per message (approximate)\npub const MAX_TOKENS_PER_MESSAGE: usize = 2048;\n\n/// Default maximum tokens per terminal output\npub const MAX_TOKENS_PER_TERMINAL_OUTPUT: usize = 256;\n\n/// Generate the default system prompt with the given viewport radius.\npub fn default_system_prompt(viewport_radius: usize) -> String {\n let viewport_lines = 2 * viewport_radius + 1;\n format!(\n r#""You are a helpful assistant that interacts with a computer shell to solve programming tasks.\nYour goal is to predict the next bash command a developer would most likely execute, given their editing and navigation history.\n\n=== CONVERSATION FORMAT ===\nThe conversation history alternates between:\n- Assistant messages: bash commands in fenced code blocks\n- User messages: command output wrapped in <stdout>...</stdout> tags\n\nAfter each edit, you should show the resulting file contents using `cat -n FILE | sed -n 'START,ENDp'`, which produces 6-character right-aligned line numbers followed by a tab, e.g.:\n 1\tfirst line\n 2\tsecond line\n\nThe chained cat command should show {viewport_lines} lines around the edited region.\n\n=== RESPONSE FORMAT ===\nYour response must contain exactly ONE bash code block with one command or two commands connected with &&.\n\n<format_example>\n```bash\nyour_command_here\n```\n</format_example>\n\nFailure to follow these rules will cause your response to be rejected.\n\n=== EDIT COMMAND FORMAT (IMPORTANT) ===\nWhen you want to EDIT a file, you MUST encode the edit using line-based sed commands in ONE of the following forms, and you MUST NOT use substitution commands like ""Ns/old/new/g"".\n\nAssume all line numbers are 1-based and paths are absolute.\nAllowed edit encodings (choose exactly one per response):\n\n1) Replace a contiguous block of lines:\n sed -i 'START,ENDc\\nNEW_LINE_1\\nNEW_LINE_2\\n...\n' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'\n\n2) Delete a contiguous block of lines:\n sed -i 'START,ENDd' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'\n\n3) Insert new lines BEFORE a given line:\n sed -i 'STARTi\\nNEW_LINE_1\\nNEW_LINE_2\\n...\n' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'\n\n4) Append new lines at the END of the file:\n sed -i '$a\\nNEW_LINE_1\\nNEW_LINE_2\\n...\n' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'\n\nWhere VSTART and VEND specify a small viewport around the edited region.\n\nDo NOT emit commands like ""3s/print/print()/g"" or any other ""s/old/new/"" style sed substitution; instead, always rewrite the affected lines using one of the line-based forms above.\n\nWhen you are NOT editing files (e.g., running tests, git commands, tools, etc.), you may emit arbitrary bash commands.""#\n )\n}\n\n",rust,tab
|
| 181 |
+
180,1639549,"/fast/home/franz.srambical/crowd-pilot-serializer/crates/core/src/lib.rs",1201,0,"",rust,selection_command
|
| 182 |
+
181,1641394,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",0,0,"",shellscript,tab
|
| 183 |
+
182,1674620,"crates/core/src/git_pull_analysis.rs",0,0,"//! Analysis module for detecting ""out-of-sync"" data caused by git pull operations.\n//!\n//! The VS Code IDE capture extension invalidates its file cache on git checkout,\n//! but NOT on git pull. This means if a user:\n//! 1. Visits files (content cached)\n//! 2. Runs git pull (files change on disk, but cache not invalidated)\n//! 3. Edits those files\n//!\n//! The reconstructed file state will be incorrect because edits are applied\n//! to stale cached content.\n//!\n//! This module detects such sequences in captured session data.\n\nuse std::collections::{HashMap, HashSet};\nuse std::path::Path;\n\nuse serde::{Deserialize, Serialize};\n\n/// Result of analyzing a single session for git pull issues.\n#[derive(Debug, Clone, Serialize)]\npub struct SessionAnalysis {\n /// Path to the CSV file analyzed\n pub csv_path: String,\n /// All git pull events detected in this session\n pub git_pulls: Vec<GitPullEvent>,\n /// Files that were visited before any git pull\n pub files_visited_before_pull: HashSet<String>,\n /// Whether this session is potentially corrupted\n pub is_corrupted: bool,\n /// Details about the corruption if any\n pub corruption_details: Option<CorruptionDetails>,\n}\n\n/// A detected git pull event.\n#[derive(Debug, Clone, Serialize)]\npub struct GitPullEvent {\n /// Sequence number of the terminal_command event\n pub sequence: i64,\n /// The full command string\n pub command: String,\n /// Files reported as changed in the terminal output (if parseable)\n pub changed_files: Vec<String>,\n /// Whether we could parse the output\n pub output_parsed: bool,\n}\n\n/// Details about detected corruption.\n#[derive(Debug, Clone, Serialize)]\npub struct CorruptionDetails {\n /// Files that were: visited before pull, changed by pull, AND edited after pull\n pub affected_files: Vec<String>,\n /// Number of content events on affected files after the pull\n pub affected_edit_count: usize,\n}\n\n/// Overall analysis result across all sessions.\n#[derive(Debug, Clone, Serialize)]\npub struct AnalysisResult {\n /// Total number of sessions analyzed\n pub total_sessions: usize,\n /// Sessions with at least one terminal git pull\n pub sessions_with_git_pull: usize,\n /// Sessions detected as potentially corrupted\n pub corrupted_sessions: usize,\n /// Total git pull events detected\n pub total_git_pulls: usize,\n /// Git pulls where we could parse the output for changed files\n pub parseable_git_pulls: usize,\n /// Total affected files across all corrupted sessions\n pub total_affected_files: usize,\n /// Total affected edit events across all corrupted sessions\n pub total_affected_edits: usize,\n /// Percentage of sessions that are corrupted\n pub corruption_rate: f64,\n /// List of corrupted session paths for review\n pub corrupted_session_paths: Vec<String>,\n}\n\n/// A row from the CSV file (for analysis purposes).\n#[derive(Debug, Deserialize)]\n#[serde(rename_all = ""PascalCase"")]\nstruct CsvRow {\n #[serde(rename = ""Sequence"")]\n sequence: Option<i64>,\n file: String,\n text: Option<String>,\n #[serde(rename = ""Type"")]\n event_type: String,\n}\n\n/// State machine for tracking events during session analysis.\n#[derive(Debug, Default)]\nstruct AnalysisState {\n /// Files visited (TAB events) - maps file path to first visit sequence\n files_visited: HashMap<String, i64>,\n /// Detected git pull events\n git_pulls: Vec<GitPullEvent>,\n /// Pending terminal command that might be git pull\n pending_git_pull: Option<(i64, String)>,\n /// Buffer for terminal output following a git pull\n terminal_output_buffer: Vec<String>,\n /// Content events after each git pull - maps file path to edit count\n content_events_after_pull: HashMap<String, usize>,\n /// Sequence number of the last git pull (for tracking ""after"" events)\n last_git_pull_sequence: Option<i64>,\n /// Current sequence number\n current_sequence: i64,\n}\n\nimpl AnalysisState {\n fn new() -> Self {\n Self::default()\n }\n\n fn handle_tab_event(&mut self, file: &str, sequence: i64) {\n // Only record the first visit to each file\n self.files_visited.entry(file.to_string()).or_insert(sequence);\n }\n\n fn handle_terminal_command(&mut self, command: &str, sequence: i64) {\n // First, flush any pending git pull with its accumulated output\n self.flush_pending_git_pull();\n\n // Check if this is a git pull command\n let command_normalized = command.replace(""\\n"", ""\n"").replace(""\\r"", ""\r"").trim().to_string();\n\n if is_git_pull_command(&command_normalized) {\n self.pending_git_pull = Some((sequence, command_normalized));\n self.terminal_output_buffer.clear();\n }\n }\n\n fn handle_terminal_output(&mut self, output: &str) {\n if self.pending_git_pull.is_some() {\n let output_normalized = output.replace(""\\n"", ""\n"").replace(""\\r"", ""\r"");\n self.terminal_output_buffer.push(output_normalized);\n }\n }\n\n fn handle_content_event(&mut self, file: &str, sequence: i64) {\n // Track content events that happen after a git pull\n if let Some(pull_seq) = self.last_git_pull_sequence {\n if sequence > pull_seq {\n *self.content_events_after_pull.entry(file.to_string()).or_insert(0) += 1;\n }\n }\n self.current_sequence = sequence;\n }\n\n fn handle_git_branch_checkout(&mut self) {\n // A branch checkout invalidates the cache, so we reset tracking\n // Files visited before checkout don't matter for corruption detection\n // after this point because the cache would be cleared\n self.files_visited.clear();\n self.content_events_after_pull.clear();\n self.last_git_pull_sequence = None;\n }\n\n fn flush_pending_git_pull(&mut self) {\n if let Some((sequence, command)) = self.pending_git_pull.take() {\n let combined_output = self.terminal_output_buffer.join("""");\n let (changed_files, output_parsed) = parse_git_pull_output(&combined_output);\n\n self.git_pulls.push(GitPullEvent {\n sequence,\n command,\n changed_files,\n output_parsed,\n });\n\n self.last_git_pull_sequence = Some(sequence);\n self.terminal_output_buffer.clear();\n }\n }\n\n fn finalize(mut self) -> SessionAnalysis {\n // Flush any remaining pending git pull\n self.flush_pending_git_pull();\n\n let files_visited_before_pull: HashSet<String> = if let Some(first_pull) = self.git_pulls.first() {\n self.files_visited\n .iter()\n .filter(|(_, &seq)| seq < first_pull.sequence)\n .map(|(path, _)| path.clone())\n .collect()\n } else {\n HashSet::new()\n };\n\n // Determine corruption: files that were visited before pull AND edited after\n let mut affected_files = Vec::new();\n let mut affected_edit_count = 0;\n\n // Get all files changed by any git pull\n let files_changed_by_pulls: HashSet<String> = self.git_pulls\n .iter()\n .flat_map(|p| p.changed_files.iter().cloned())\n .collect();\n\n for (file, edit_count) in &self.content_events_after_pull {\n // File is affected if:\n // 1. It was visited before the pull (in cache)\n // 2. It was changed by the pull (or we couldn't parse output)\n // 3. It was edited after the pull\n let was_visited_before = files_visited_before_pull.contains(file);\n let was_changed_by_pull = files_changed_by_pulls.contains(file)\n || self.git_pulls.iter().any(|p| !p.output_parsed); // Conservative: if we can't parse, assume affected\n\n if was_visited_before && was_changed_by_pull {\n affected_files.push(file.clone());\n affected_edit_count += edit_count;\n }\n }\n\n let is_corrupted = !affected_files.is_empty();\n let corruption_details = if is_corrupted {\n Some(CorruptionDetails {\n affected_files: affected_files.clone(),\n affected_edit_count,\n })\n } else {\n None\n };\n\n SessionAnalysis {\n csv_path: String::new(), // Will be set by caller\n git_pulls: self.git_pulls,\n files_visited_before_pull,\n is_corrupted,\n corruption_details,\n }\n }\n}\n\n/// Check if a command is a git pull (or equivalent).\nfn is_git_pull_command(command: &str) -> bool {\n let cmd = command.trim().to_lowercase();\n\n // Direct git pull\n if cmd.starts_with(""git pull"") {\n return true;\n }\n\n // git fetch followed by git merge (often in a single command)\n if cmd.contains(""git fetch"") && cmd.contains(""git merge"") {\n return true;\n }\n\n // Common aliases\n if cmd == ""gpl"" || cmd == ""gl"" || cmd.starts_with(""gpl "") || cmd.starts_with(""gl "") {\n return true;\n }\n\n false\n}\n\n/// Parse git pull output to extract changed files.\n///\n/// Returns (changed_files, was_parseable).\nfn parse_git_pull_output(output: &str) -> (Vec<String>, bool) {\n let mut changed_files = Vec::new();\n let mut found_any_pattern = false;\n\n for line in output.lines() {\n let line = line.trim();\n\n // Pattern 1: ""path/to/file.rs | 10 ++-"" (diff stat format)\n if line.contains('|') && (line.contains('+') || line.contains('-')) {\n if let Some(file_part) = line.split('|').next() {\n let file = file_part.trim();\n if !file.is_empty() && !file.contains(""changed"") && !file.contains(""insertion"") {\n changed_files.push(normalize_file_path(file));\n found_any_pattern = true;\n }\n }\n }\n\n // Pattern 2: ""create mode 100644 path/to/file.rs""\n if line.starts_with(""create mode"") || line.starts_with(""delete mode"") {\n let parts: Vec<&str> = line.split_whitespace().collect();\n if parts.len() >= 4 {\n changed_files.push(normalize_file_path(parts[3]));\n found_any_pattern = true;\n }\n }\n\n // Pattern 3: ""rename path/to/old.rs => path/to/new.rs""\n if line.starts_with(""rename "") && line.contains(""=>"") {\n if let Some(arrow_pos) = line.find(""=>"") {\n let new_file = line[arrow_pos + 2..].trim();\n // Handle the {old => new} format\n if let Some(brace_end) = new_file.find('}') {\n let base = &line[7..line.find('{').unwrap_or(7)];\n let new_part = &new_file[..brace_end];\n changed_files.push(normalize_file_path(&format!(""{}{}"", base.trim(), new_part.trim())));\n } else {\n changed_files.push(normalize_file_path(new_file));\n }\n found_any_pattern = true;\n }\n }\n }\n\n // Check for ""Already up to date"" which means no changes\n if output.contains(""Already up to date"") || output.contains(""Already up-to-date"") {\n return (Vec::new(), true);\n }\n\n // Check for fast-forward indicator\n if output.contains(""Fast-forward"") && changed_files.is_empty() && !found_any_pattern {\n // Fast-forward happened but we couldn't parse files - mark as unparseable\n return (Vec::new(), false);\n }\n\n (changed_files, found_any_pattern || output.contains(""Already up to date""))\n}\n\n/// Normalize a file path for comparison.\nfn normalize_file_path(path: &str) -> String {\n // Remove leading ./ if present\n let path = path.strip_prefix(""./"").unwrap_or(path);\n // Remove any trailing whitespace or special chars\n path.trim().to_string()\n}\n\n/// Analyze a single CSV session file for git pull issues.\npub fn analyze_session(csv_path: &Path) -> Result<SessionAnalysis, Box<dyn std::error::Error>> {\n let mut state = AnalysisState::new();\n let mut reader = csv::Reader::from_path(csv_path)?;\n\n for result in reader.deserialize() {\n let row: CsvRow = result?;\n let sequence = row.sequence.unwrap_or(0);\n\n match row.event_type.as_str() {\n ""tab"" => {\n state.handle_tab_event(&row.file, sequence);\n }\n ""content"" => {\n state.handle_content_event(&row.file, sequence);\n }\n ""terminal_command"" => {\n if let Some(text) = &row.text {\n state.handle_terminal_command(text, sequence);\n }\n }\n ""terminal_output"" => {\n if let Some(text) = &row.text {\n state.handle_terminal_output(text);\n }\n }\n ""git_branch_checkout"" => {\n state.handle_git_branch_checkout();\n }\n _ => {}\n }\n }\n\n let mut analysis = state.finalize();\n analysis.csv_path = csv_path.to_string_lossy().to_string();\n Ok(analysis)\n}\n\n/// Analyze all sessions in a directory for git pull issues.\npub fn analyze_all_sessions(root: &Path) -> Result<AnalysisResult, Box<dyn std::error::Error>> {\n use rayon::prelude::*;\n use std::sync::atomic::{AtomicUsize, Ordering};\n use walkdir::WalkDir;\n\n // Discover CSV files\n let csv_files: Vec<_> = WalkDir::new(root)\n .into_iter()\n .filter_map(|e| e.ok())\n .filter(|e| e.path().extension().map_or(false, |ext| ext == ""csv""))\n .map(|e| e.path().to_path_buf())\n .collect();\n\n if csv_files.is_empty() {\n return Err(format!(""No CSV files found under {:?}"", root).into());\n }\n\n let total_files = csv_files.len();\n let processed_count = AtomicUsize::new(0);\n\n // Process in parallel\n let analyses: Vec<SessionAnalysis> = csv_files\n .into_par_iter()\n .filter_map(|csv_path| {\n let result = analyze_session(&csv_path);\n let count = processed_count.fetch_add(1, Ordering::Relaxed) + 1;\n\n if count % 100 == 0 || count == total_files {\n eprintln!(""Analyzed {}/{} sessions..."", count, total_files);\n }\n\n match result {\n Ok(analysis) => Some(analysis),\n Err(e) => {\n eprintln!(""Error analyzing {:?}: {}"", csv_path, e);\n None\n }\n }\n })\n .collect();\n\n // Aggregate results\n let total_sessions = analyses.len();\n let sessions_with_git_pull = analyses.iter().filter(|a| !a.git_pulls.is_empty()).count();\n let corrupted_sessions = analyses.iter().filter(|a| a.is_corrupted).count();\n let total_git_pulls: usize = analyses.iter().map(|a| a.git_pulls.len()).sum();\n let parseable_git_pulls: usize = analyses\n .iter()\n .flat_map(|a| a.git_pulls.iter())\n .filter(|p| p.output_parsed)\n .count();\n\n let total_affected_files: usize = analyses\n .iter()\n .filter_map(|a| a.corruption_details.as_ref())\n .map(|d| d.affected_files.len())\n .sum();\n\n let total_affected_edits: usize = analyses\n .iter()\n .filter_map(|a| a.corruption_details.as_ref())\n .map(|d| d.affected_edit_count)\n .sum();\n\n let corrupted_session_paths: Vec<String> = analyses\n .iter()\n .filter(|a| a.is_corrupted)\n .map(|a| a.csv_path.clone())\n .collect();\n\n let corruption_rate = if total_sessions > 0 {\n corrupted_sessions as f64 / total_sessions as f64\n } else {\n 0.0\n };\n\n Ok(AnalysisResult {\n total_sessions,\n sessions_with_git_pull,\n corrupted_sessions,\n total_git_pulls,\n parseable_git_pulls,\n total_affected_files,\n total_affected_edits,\n corruption_rate,\n corrupted_session_paths,\n })\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn test_is_git_pull_command() {\n assert!(is_git_pull_command(""git pull""));\n assert!(is_git_pull_command(""git pull origin main""));\n assert!(is_git_pull_command(""git pull --rebase""));\n assert!(is_git_pull_command("" git pull ""));\n assert!(is_git_pull_command(""GIT PULL""));\n\n assert!(!is_git_pull_command(""git push""));\n assert!(!is_git_pull_command(""git status""));\n assert!(!is_git_pull_command(""echo git pull""));\n }\n\n #[test]\n fn test_parse_git_pull_output_already_up_to_date() {\n let output = ""Already up to date.\n"";\n let (files, parsed) = parse_git_pull_output(output);\n assert!(parsed);\n assert!(files.is_empty());\n }\n\n #[test]\n fn test_parse_git_pull_output_with_changes() {\n let output = r#""\nUpdating abc123..def456\nFast-forward\n src/main.rs | 10 +++++-----\n src/lib.rs | 5 +++++\n 2 files changed, 10 insertions(+), 5 deletions(-)\n""#;\n let (files, parsed) = parse_git_pull_output(output);\n assert!(parsed);\n assert!(files.contains(&""src/main.rs"".to_string()));\n assert!(files.contains(&""src/lib.rs"".to_string()));\n }\n\n #[test]\n fn test_parse_git_pull_output_with_new_file() {\n let output = r#""\nUpdating abc123..def456\nFast-forward\n src/new_file.rs | 20 ++++++++++++++++++++\n create mode 100644 src/new_file.rs\n 1 file changed, 20 insertions(+)\n""#;\n let (files, parsed) = parse_git_pull_output(output);\n assert!(parsed);\n assert!(files.contains(&""src/new_file.rs"".to_string()));\n }\n}\n",rust,tab
|
| 184 |
+
183,1702989,"crates/core/src/git_pull_analysis.rs",84,0,"",rust,selection_command
|
| 185 |
+
184,1703152,"crates/core/src/git_pull_analysis.rs",88,0,"",rust,selection_command
|
| 186 |
+
185,1726764,"crates/core/src/git_pull_analysis.rs",1836,0,"",rust,selection_keyboard
|
| 187 |
+
186,1738810,"crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
| 188 |
+
187,1739333,"crates/core/src/git_pull_analysis.rs",3607,0,"",rust,selection_keyboard
|
| 189 |
+
188,1753387,"crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
| 190 |
+
189,1776334,"crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
| 191 |
+
190,1776932,"crates/core/src/git_pull_analysis.rs",5419,0,"",rust,selection_keyboard
|
| 192 |
+
191,1800599,"crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
| 193 |
+
192,1801572,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 194 |
+
193,1803186,"TERMINAL",0,0,"",,terminal_focus
|
| 195 |
+
194,1803186,"crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
| 196 |
+
195,1828682,"TERMINAL",0,0,"ls /fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/",,terminal_command
|
| 197 |
+
196,1828686,"TERMINAL",0,0,"]633;C[0m[01;34m05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1[0m [01;34m4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328[0m [01;34me8b08c312d88206805b92191af1ee2a660f8f0e59d3990233d6a3f81cdab43f4[0m\r\n[01;34m1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d[0m [01;34m69a563db57051868fc3ecdda3a43f162385be48f5447fe691a10177ee4dc3a0e[0m README.md\r\n[01;34m1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389[0m [01;34m927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403[0m\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-serializer",,terminal_output
|
| 198 |
+
197,1846784,"/temp/readonly/Bash tool output (hewi0f)",0,0,"Analyzing sessions for git pull corruption...\nCSV root: ""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/""\n\nAnalyzed 100/349 sessions...\nAnalyzed 200/349 sessions...\nAnalyzed 300/349 sessions...\nAnalyzed 349/349 sessions...\n\n╔══════════════════════════════════════════════════════════════╗\n║ GIT PULL CORRUPTION ANALYSIS RESULTS ║\n╠══════════════════════════════════════════════════════════════╣\n║ Total sessions analyzed: 349 ║\n║ Sessions with terminal git pull: 22 ║\n║ Corrupted sessions (detected): 1 ║\n╠══════════════════════════════════════════════════════════════╣\n║ Total git pull commands: 40 ║\n║ Git pulls with parseable output: 33 ║\n╠══════════════════════════════════════════════════════════════╣\n║ Total affected files: 1 ║\n║ Total affected edit events: 1 ║\n╠══════════════════════════════════════════════════════════════╣\n║ CORRUPTION RATE (lower bound): 0.29% ║\n╚══════════════════════════════════════════════════════════════╝\n\n[note] This analysis only detects terminal-based git pulls.\n VS Code UI git pulls (~33% of users) are undetectable.\n Actual corruption rate may be ~1.5x higher.",plaintext,tab
|
| 199 |
+
198,2303557,"/temp/readonly/command (zkiuxu)",0,0,"./target/release/crowd-pilot-serialize analyze-git-pull --csv-root /fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/ --verbose 2>&1 | tail -20",plaintext,tab
|
| 200 |
+
199,2305581,"/temp/readonly/command (zkiuxu)",0,151,"./target/release/crowd-pilot-serialize analyze-git-pull --csv-root /fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/ --verbose 2>&1 | tail -20",plaintext,selection_command
|
| 201 |
+
200,2305947,"/temp/readonly/command (zkiuxu)",0,0,"",plaintext,selection_command
|
| 202 |
+
201,2306731,"crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
| 203 |
+
202,2341517,"/home/franz.srambical/crowd-code/src/extension.ts",0,0,"",typescript,tab
|
| 204 |
+
203,2343747,"/home/franz.srambical/crowd-code/src/extension.ts",2091,81,"export async function activate(context: vscode.ExtensionContext): Promise<void> {",typescript,selection_command
|
| 205 |
+
204,2412027,"/temp/readonly/Bash tool output (bhdhua)",0,0,"╠══════════════════════════════════════════════════════════════╣\n║ Total sessions analyzed: 349 ║\n║ Sessions with terminal git pull: 22 ║\n║ Corrupted sessions (detected): 1 ║\n╠══════════════════════════════════════════════════════════════╣\n║ Total git pull commands: 40 ║\n║ Git pulls with parseable output: 33 ║\n╠══════════════════════════════════════════════════════════════╣\n║ Total affected files: 1 ║\n║ Total affected edit events: 1 ║\n╠══════════════════════════════════════════════════════════════╣\n║ CORRUPTION RATE (lower bound): 0.29% ║\n╚══════════════════════════════════════════════════════════════╝\n\n[note] This analysis only detects terminal-based git pulls.\n VS Code UI git pulls (~33% of users) are undetectable.\n Actual corruption rate may be ~1.5x higher.\n\nCorrupted sessions:\n - /fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-579e9536-d5ab-483e-9ba4-40888138347a1751043147165-2025_06_27-18.53.18.94/source.csv",plaintext,tab
|
| 206 |
+
205,3170241,"/fast/home/franz.srambical/crowd-pilot-serializer/crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
| 207 |
+
206,3170242,"/fast/home/franz.srambical/crowd-pilot-serializer/crates/core/src/git_pull_analysis.rs",1198,0,"",rust,selection_command
|
| 208 |
+
207,3174232,"/temp/readonly/Bash tool output (bhdhua)",0,0,"",plaintext,tab
|
| 209 |
+
208,3304059,"crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
| 210 |
+
209,3306525,"crates/core/src/git_pull_analysis.rs",4960,0,"",rust,selection_mouse
|
| 211 |
+
210,3306527,"crates/core/src/git_pull_analysis.rs",4959,0,"",rust,selection_command
|
| 212 |
+
211,3309698,"crates/core/src/git_pull_analysis.rs",726,108,"",rust,content
|
| 213 |
+
212,3309716,"crates/core/src/git_pull_analysis.rs",527,97,"",rust,content
|
| 214 |
+
213,3309719,"crates/core/src/git_pull_analysis.rs",4863,0,"",rust,selection_command
|
| 215 |
+
214,3310959,"crates/core/src/git_pull_analysis.rs",527,0,", and provides\n//! token-aware analysis that counts clean vs corrupted tokens after serialization",rust,content
|
| 216 |
+
215,3310968,"crates/core/src/git_pull_analysis.rs",726,0,";\n\nuse crate::conversation::{ConversationStateManager, ConversationStateManagerConfig};\nuse crate::Tokenizer",rust,content
|
| 217 |
+
216,3310969,"crates/core/src/git_pull_analysis.rs",4959,0,"",rust,selection_command
|
| 218 |
+
217,3421536,"crates/core/src/git_pull_analysis.rs",0,0,"",rust,tab
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3e3ce02e-664a-4f58-9d7f-0f56e32c7def1753363875204-2025_07_24-15.31.23.202/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4001b8e0-0e9c-4560-958e-a52f816eab081767861210539-2026_01_08-09.33.45.304/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-400c1369-388a-4663-9475-37c30815fb401752229095376-2025_07_11-12.18.28.454/source.csv
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"experiments/tokenizer_optimal_batch_size.sh",0,0,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\ndata_dir=""$PWD/data_arrayrecord/dummy""\nckpt_dir=""$PWD/checkpoints/tokenizer_openai_grain_checkpointing""\n\nexport XLA_FLAGS=--xla_gpu_autotune_level=0\nsrun python train_tokenizer.py \\n --batch_size 12 \\n --ckpt_dir $ckpt_dir \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
| 3 |
+
2,862,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:18:28 PM [info] Activating crowd-code\n12:18:28 PM [info] Recording started\n12:18:28 PM [info] Initializing git provider using file system watchers...\n12:18:29 PM [info] Git repository found\n12:18:29 PM [info] Git provider initialized successfully\n12:18:29 PM [info] Initial git state: [object Object]\n",Log,tab
|
| 4 |
+
3,2102,"experiments/tokenizer_optimal_batch_size.sh",0,0,"",shellscript,tab
|
| 5 |
+
4,8512,"TERMINAL",0,0,"squeue",,terminal_command
|
| 6 |
+
5,8563,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;squeue;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25h",,terminal_output
|
| 7 |
+
6,25189,"TERMINAL",0,0,"salloc --gpus=4 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G",,terminal_command
|
| 8 |
+
7,25260,"TERMINAL",0,0,"[?25l[1;29H\r]633;A[franz.srambical@[31;47;1mhai-login2.haicore.berlin[m:~/jafar] $ ]633;Bsalloc --gpus=4 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G[A\r]633;A[franz.srambical@[31;47;1mhai-login2.haicore.berlin[m:~/jafar] $ ]633;B\r\n\r\r\n[?2004l\r]633;E;salloc --gpus=4 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25hsalloc: Granted job allocation 13338\r\n",,terminal_output
|
| 9 |
+
8,25684,"TERMINAL",0,0,"salloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
| 10 |
+
9,25771,"TERMINAL",0,0,"Running inside SLURM, Job ID 13338.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 11 |
+
10,27256,"TERMINAL",0,0,"exit",,terminal_output
|
| 12 |
+
11,28213,"TERMINAL",0,0,"[?25l[6;55He[?25h",,terminal_output
|
| 13 |
+
12,29203,"TERMINAL",0,0,"\r\n[?2004l\rbash: exite: command not found\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 14 |
+
13,30079,"TERMINAL",0,0,"exite[K",,terminal_output
|
| 15 |
+
14,30263,"TERMINAL",0,0,"e",,terminal_output
|
| 16 |
+
15,30475,"TERMINAL",0,0,"[?25l[8;52Hx[8;54H[?25h",,terminal_output
|
| 17 |
+
16,30561,"TERMINAL",0,0,"[?25l[8;53Hi[8;55H[?25h",,terminal_output
|
| 18 |
+
17,30724,"TERMINAL",0,0,"[?25l[8;54Ht[9;1H[?25h",,terminal_output
|
| 19 |
+
18,30970,"TERMINAL",0,0,"[?25l[?2004l\rexit\r\n[?25hsrun: error: hai002: task 0: Exited with exit code 127\r\nsalloc: Relinquishing job allocation 13338\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;127]633;P;Cwd=/home/franz.srambical/jafar",,terminal_output
|
| 20 |
+
19,45408,"TERMINAL",0,0,"salloc --gpus=8 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G",,terminal_command
|
| 21 |
+
20,45451,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25hsalloc: Granted job allocation 13339\r\n",,terminal_output
|
| 22 |
+
21,45818,"TERMINAL",0,0,"salloc: Nodes hai002 are ready for job\r\nRunning inside SLURM, Job ID 13339.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 23 |
+
22,47437,"TERMINAL",0,0,"[?25le[2mx[22m[2mi[22m[17;54H[?25h[?25l[17;52Hx[17;54H[?25h[?25l[17;53Hi[17;54H[?25h",,terminal_output
|
| 24 |
+
23,47884,"TERMINAL",0,0,"[?25l[17;54Ht[18;1H[?25h",,terminal_output
|
| 25 |
+
24,47931,"TERMINAL",0,0,"[?25l[?2004l\rexit\r\nsalloc: Relinquishing job allocation 13339\r\n[?25h",,terminal_output
|
| 26 |
+
25,58411,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=1 --mem=100G",,terminal_command
|
| 27 |
+
26,58424,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=1 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;Csalloc: Granted job allocation 13340\r\n",,terminal_output
|
| 28 |
+
27,58433,"TERMINAL",0,0,"salloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
| 29 |
+
28,58572,"TERMINAL",0,0,"Running inside SLURM, Job ID 13340.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 30 |
+
29,61650,"TERMINAL",0,0,"[?25ls[2mr[22m[2mu[22m[25;54H[?25h",,terminal_output
|
| 31 |
+
30,61803,"TERMINAL",0,0,"[?25l[25;52Hr[25;53Hu[25;56H[?25h[?25l[25;54Hn[25;55H [25;56H[?25h",,terminal_output
|
| 32 |
+
31,66115,"TERMINAL",0,0,"[?25l[25;56H-[25;57H[?25h",,terminal_output
|
| 33 |
+
32,66370,"TERMINAL",0,0,"[?25l[25;57H-[25;59H[?25h[?25l[25;58Hn[25;59H[?25h",,terminal_output
|
| 34 |
+
33,66419,"TERMINAL",0,0,"[?25l[25;59Ht[25;61H[?25h",,terminal_output
|
| 35 |
+
34,66544,"TERMINAL",0,0,"[?25l[25;60Ha[25;62H[?25h[?25l[25;61Hs[25;62H[?25h",,terminal_output
|
| 36 |
+
35,66670,"TERMINAL",0,0,"[?25l[25;62Hk[25;63H[?25h",,terminal_output
|
| 37 |
+
36,66915,"TERMINAL",0,0,"[?25l[25;63Hs[25;64H[?25h",,terminal_output
|
| 38 |
+
37,68509,"TERMINAL",0,0,"[?25l[25;64H=[25;65H[?25h",,terminal_output
|
| 39 |
+
38,68723,"TERMINAL",0,0,"[?25l[25;65H9[25;67H[?25h[?25l[25;66H [25;67H[?25h",,terminal_output
|
| 40 |
+
39,69523,"TERMINAL",0,0,"[?25l[25;65H8[25;66H[?25h",,terminal_output
|
| 41 |
+
40,69623,"TERMINAL",0,0,"[?25l[25;66H [25;67H[?25h",,terminal_output
|
| 42 |
+
41,76724,"experiments/tokenizer_optimal_batch_size.sh",220,0,"",shellscript,selection_command
|
| 43 |
+
42,77004,"experiments/tokenizer_optimal_batch_size.sh",208,0,"",shellscript,selection_command
|
| 44 |
+
43,77140,"experiments/tokenizer_optimal_batch_size.sh",201,0,"",shellscript,selection_command
|
| 45 |
+
44,77260,"experiments/tokenizer_optimal_batch_size.sh",196,0,"",shellscript,selection_command
|
| 46 |
+
45,77780,"experiments/tokenizer_optimal_batch_size.sh",199,0,"",shellscript,selection_command
|
| 47 |
+
46,78055,"experiments/tokenizer_optimal_batch_size.sh",200,0,"",shellscript,selection_command
|
| 48 |
+
47,78141,"experiments/tokenizer_optimal_batch_size.sh",200,0," ",shellscript,content
|
| 49 |
+
48,78141,"experiments/tokenizer_optimal_batch_size.sh",201,0,"",shellscript,selection_keyboard
|
| 50 |
+
49,80576,"experiments/tokenizer_optimal_batch_size.sh",200,1,"",shellscript,content
|
| 51 |
+
50,80683,"experiments/tokenizer_optimal_batch_size.sh",199,0,"",shellscript,selection_command
|
| 52 |
+
51,89691,"experiments/tokenizer_grain_checkpointing.sh",0,0,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\ndata_dir=""$PWD/data_arrayrecord/dummy""\nckpt_dir=""$PWD/checkpoints/tokenizer_openai_grain_checkpointing""\n\nexport XLA_FLAGS=--xla_gpu_autotune_level=0\nsrun python train_tokenizer.py \\n --batch_size 48 \\n --restore_ckpt \\n --save_ckpt \\n --log_checkpoint_interval 5 \\n --ckpt_dir $ckpt_dir \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
| 53 |
+
52,92956,"experiments/tokenizer_grain_checkpointing.sh",466,0,"",shellscript,selection_command
|
| 54 |
+
53,93385,"experiments/tokenizer_grain_checkpointing.sh",441,0,"",shellscript,selection_command
|
| 55 |
+
54,93386,"experiments/tokenizer_grain_checkpointing.sh",416,0,"",shellscript,selection_command
|
| 56 |
+
55,93386,"experiments/tokenizer_grain_checkpointing.sh",399,0,"",shellscript,selection_command
|
| 57 |
+
56,93389,"experiments/tokenizer_grain_checkpointing.sh",374,0,"",shellscript,selection_command
|
| 58 |
+
57,93389,"experiments/tokenizer_grain_checkpointing.sh",347,0,"",shellscript,selection_command
|
| 59 |
+
58,93399,"experiments/tokenizer_grain_checkpointing.sh",313,0,"",shellscript,selection_command
|
| 60 |
+
59,93399,"experiments/tokenizer_grain_checkpointing.sh",288,0,"",shellscript,selection_command
|
| 61 |
+
60,93443,"experiments/tokenizer_grain_checkpointing.sh",270,0,"",shellscript,selection_command
|
| 62 |
+
61,93563,"experiments/tokenizer_grain_checkpointing.sh",249,0,"",shellscript,selection_command
|
| 63 |
+
62,93852,"experiments/tokenizer_grain_checkpointing.sh",246,0,"",shellscript,selection_command
|
| 64 |
+
63,99523,"experiments/tokenizer_grain_checkpointing.sh",246,2,"",shellscript,content
|
| 65 |
+
64,100143,"experiments/tokenizer_grain_checkpointing.sh",246,0,"9",shellscript,content
|
| 66 |
+
65,100144,"experiments/tokenizer_grain_checkpointing.sh",247,0,"",shellscript,selection_keyboard
|
| 67 |
+
66,100232,"experiments/tokenizer_grain_checkpointing.sh",247,0,"6",shellscript,content
|
| 68 |
+
67,100232,"experiments/tokenizer_grain_checkpointing.sh",248,0,"",shellscript,selection_keyboard
|
| 69 |
+
68,100483,"experiments/tokenizer_grain_checkpointing.sh",247,0,"",shellscript,selection_command
|
| 70 |
+
69,104563,"experiments/tokenizer_grain_checkpointing.sh",214,0,"",shellscript,selection_command
|
| 71 |
+
70,104683,"experiments/tokenizer_grain_checkpointing.sh",208,0,"",shellscript,selection_command
|
| 72 |
+
71,104811,"experiments/tokenizer_grain_checkpointing.sh",201,0,"",shellscript,selection_command
|
| 73 |
+
72,106363,"experiments/tokenizer_grain_checkpointing.sh",201,0,"-",shellscript,content
|
| 74 |
+
73,106364,"experiments/tokenizer_grain_checkpointing.sh",202,0,"",shellscript,selection_keyboard
|
| 75 |
+
74,106364,"experiments/tokenizer_grain_checkpointing.sh",202,0,"-",shellscript,content
|
| 76 |
+
75,106364,"experiments/tokenizer_grain_checkpointing.sh",203,0,"",shellscript,selection_keyboard
|
| 77 |
+
76,106364,"experiments/tokenizer_grain_checkpointing.sh",203,0,"n",shellscript,content
|
| 78 |
+
77,106365,"experiments/tokenizer_grain_checkpointing.sh",204,0,"",shellscript,selection_keyboard
|
| 79 |
+
78,106365,"experiments/tokenizer_grain_checkpointing.sh",204,0,"t",shellscript,content
|
| 80 |
+
79,106365,"experiments/tokenizer_grain_checkpointing.sh",205,0,"",shellscript,selection_keyboard
|
| 81 |
+
80,106443,"experiments/tokenizer_grain_checkpointing.sh",205,0,"a",shellscript,content
|
| 82 |
+
81,106444,"experiments/tokenizer_grain_checkpointing.sh",206,0,"",shellscript,selection_keyboard
|
| 83 |
+
82,106539,"experiments/tokenizer_grain_checkpointing.sh",206,0,"s",shellscript,content
|
| 84 |
+
83,106539,"experiments/tokenizer_grain_checkpointing.sh",207,0,"",shellscript,selection_keyboard
|
| 85 |
+
84,107223,"experiments/tokenizer_grain_checkpointing.sh",207,0,"k",shellscript,content
|
| 86 |
+
85,107224,"experiments/tokenizer_grain_checkpointing.sh",208,0,"",shellscript,selection_keyboard
|
| 87 |
+
86,107311,"experiments/tokenizer_grain_checkpointing.sh",208,0,"s",shellscript,content
|
| 88 |
+
87,107312,"experiments/tokenizer_grain_checkpointing.sh",209,0,"",shellscript,selection_keyboard
|
| 89 |
+
88,108179,"experiments/tokenizer_grain_checkpointing.sh",209,0,"=",shellscript,content
|
| 90 |
+
89,108179,"experiments/tokenizer_grain_checkpointing.sh",210,0,"",shellscript,selection_keyboard
|
| 91 |
+
90,108400,"experiments/tokenizer_grain_checkpointing.sh",210,0,"8",shellscript,content
|
| 92 |
+
91,108400,"experiments/tokenizer_grain_checkpointing.sh",211,0,"",shellscript,selection_keyboard
|
| 93 |
+
92,108771,"experiments/tokenizer_grain_checkpointing.sh",210,0,"",shellscript,selection_command
|
| 94 |
+
93,108923,"experiments/tokenizer_grain_checkpointing.sh",211,0,"",shellscript,selection_command
|
| 95 |
+
94,108987,"experiments/tokenizer_grain_checkpointing.sh",211,0," ",shellscript,content
|
| 96 |
+
95,108987,"experiments/tokenizer_grain_checkpointing.sh",212,0,"",shellscript,selection_keyboard
|
| 97 |
+
96,109163,"experiments/tokenizer_grain_checkpointing.sh",211,0,"",shellscript,selection_command
|
| 98 |
+
97,110511,"experiments/tokenizer_grain_checkpointing.sh",210,0,"",shellscript,selection_command
|
| 99 |
+
98,110683,"experiments/tokenizer_grain_checkpointing.sh",209,0,"",shellscript,selection_command
|
| 100 |
+
99,110803,"experiments/tokenizer_grain_checkpointing.sh",203,0,"",shellscript,selection_command
|
| 101 |
+
100,110963,"experiments/tokenizer_grain_checkpointing.sh",201,0,"",shellscript,selection_command
|
| 102 |
+
101,111571,"experiments/tokenizer_grain_checkpointing.sh",203,0,"",shellscript,selection_command
|
| 103 |
+
102,111572,"experiments/tokenizer_grain_checkpointing.sh",209,0,"",shellscript,selection_command
|
| 104 |
+
103,111572,"experiments/tokenizer_grain_checkpointing.sh",210,0,"",shellscript,selection_command
|
| 105 |
+
104,115151,"TERMINAL",0,0,"[K",,terminal_output
|
| 106 |
+
105,115695,"TERMINAL",0,0,"[?25l[25;51Hb[25;53H[?25h",,terminal_output
|
| 107 |
+
106,115763,"TERMINAL",0,0,"[?25l[25;52Ha[25;54H[?25h[?25l[25;53Hs[25;54H[?25h",,terminal_output
|
| 108 |
+
107,115891,"TERMINAL",0,0,"[?25l[25;54Hh[25;55H[?25h",,terminal_output
|
| 109 |
+
108,115995,"TERMINAL",0,0,"[?25l[25;55H [25;56H[?25h",,terminal_output
|
| 110 |
+
109,116611,"TERMINAL",0,0,"[?25l[25;56Hx[25;57H[?25h",,terminal_output
|
| 111 |
+
110,116883,"TERMINAL",0,0,"[?25l[25;56Hx[25;57H[?25h",,terminal_output
|
| 112 |
+
111,117263,"TERMINAL",0,0,"[?25l[25;56He[25;57H[?25h",,terminal_output
|
| 113 |
+
112,117443,"TERMINAL",0,0,"[?25l[25;57Hx[25;58H[?25h",,terminal_output
|
| 114 |
+
113,117611,"TERMINAL",0,0,"[?25l[25;58Hp[25;59H[?25h",,terminal_output
|
| 115 |
+
114,117763,"TERMINAL",0,0,"[?25l[25;59He[25;61H[?25h[?25l[25;60Hr[25;61H[?25h",,terminal_output
|
| 116 |
+
115,117923,"TERMINAL",0,0,"[?25l[25;61Hi[25;62H[?25h",,terminal_output
|
| 117 |
+
116,118047,"TERMINAL",0,0,"ments/",,terminal_output
|
| 118 |
+
117,118523,"TERMINAL",0,0,"[?25l[25;68Ht[25;69H[?25h",,terminal_output
|
| 119 |
+
118,118643,"TERMINAL",0,0,"[?25l[25;69Ho[25;70Hk[25;71H[?25h",,terminal_output
|
| 120 |
+
119,118723,"TERMINAL",0,0,"enizer_",,terminal_output
|
| 121 |
+
120,119899,"TERMINAL",0,0,"[?25l[25;78Hg[25;79H[?25h",,terminal_output
|
| 122 |
+
121,120099,"TERMINAL",0,0,"[?25l[25;79Hr[25;81H[?25h[?25l[25;80Ha[25;82H[?25h[?25l[25;81Hi[25;83H[?25h",,terminal_output
|
| 123 |
+
122,120191,"TERMINAL",0,0,"[?25l[25;82Hn[25;83H[?25h",,terminal_output
|
| 124 |
+
123,120291,"TERMINAL",0,0,"_checkpointing.sh ",,terminal_output
|
| 125 |
+
124,120807,"TERMINAL",0,0,"[?25l[?2004l\r[?25hsrun: error: Unable to create step for job 13340: More processors requested than permitted\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 126 |
+
125,132623,"TERMINAL",0,0,"e",,terminal_output
|
| 127 |
+
126,132691,"TERMINAL",0,0,"x",,terminal_output
|
| 128 |
+
127,132823,"TERMINAL",0,0,"it",,terminal_output
|
| 129 |
+
128,134760,"TERMINAL",0,0,"\r\n[?2004l\rexit\r\nsrun: error: hai002: task 0: Exited with exit code 1\r\nsalloc: Relinquishing job allocation 13340\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;1]633;P;Cwd=/home/franz.srambical/jafar",,terminal_output
|
| 130 |
+
129,141531,"TERMINAL",0,0,"salloc --gpus=8 --cpus=8 --mem=100G",,terminal_command
|
| 131 |
+
130,141532,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;salloc --gpus=8 --cpus=8 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;Csalloc: option '--cpus=8' is ambiguous; possibilities: '--cpus-per-gpu' '--cpus-per-task'\r\nTry ""salloc --help"" for more information\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;255[?25h",,terminal_output
|
| 132 |
+
131,150389,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-gpu=1 --mem=100G",,terminal_command
|
| 133 |
+
132,150447,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-gpu=1 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25hsalloc: Pending job allocation 13341\r\nsalloc: job 13341 queued and waiting for resources\r\n",,terminal_output
|
| 134 |
+
133,163548,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=8 --mem=100G",,terminal_command
|
| 135 |
+
134,163651,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=8 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25hsalloc: Pending job allocation 13342\r\nsalloc: job 13342 queued and waiting for resources\r\n",,terminal_output
|
| 136 |
+
135,184827,"TERMINAL",0,0,"exit",,terminal_command
|
| 137 |
+
136,184878,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;exit;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;Cexit\r\n[?25h",,terminal_output
|
| 138 |
+
137,186518,"TERMINAL",0,0,"",,terminal_focus
|
| 139 |
+
138,198439,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=4 --mem=100G",,terminal_command
|
| 140 |
+
139,198518,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=4 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C[?25hsalloc: Pending job allocation 13343\r\nsalloc: job 13343 queued and waiting for resources\r\n",,terminal_output
|
| 141 |
+
140,210058,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=1 --mem=100G",,terminal_command
|
| 142 |
+
141,210116,"TERMINAL",0,0,"\r\n\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=1 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;Csalloc: Pending job allocation 13344\r\nsalloc: job 13344 queued and waiting for resources\r\n",,terminal_output
|
| 143 |
+
142,217717,"TERMINAL",0,0,"^C",,terminal_command
|
| 144 |
+
143,217732,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C",,terminal_output
|
| 145 |
+
144,219775,"TERMINAL",0,0,"squeue",,terminal_command
|
| 146 |
+
145,219776,"TERMINAL",0,0,"[?25l[1;29H\r]633;A[franz.srambical@[31;47;1mhai-login2.haicore.berlin[m:~/jafar] $ ]633;Bsqueue\r\n[?2004l\r]633;E;squeue;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n[?25h",,terminal_output
|
| 147 |
+
146,231205,"TERMINAL",0,0,"salloc --gpus=8 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G",,terminal_command
|
| 148 |
+
147,231250,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;salloc --gpus=8 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C[?25hsalloc: Pending job allocation 13345\r\nsalloc: job 13345 queued and waiting for resources\r\n",,terminal_output
|
| 149 |
+
148,243682,"TERMINAL",0,0,"salloc: job 13345 has been allocated resources\r\nsalloc: Granted job allocation 13345\r\n",,terminal_output
|
| 150 |
+
149,243813,"TERMINAL",0,0,"salloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
| 151 |
+
150,244111,"TERMINAL",0,0,"Running inside SLURM, Job ID 13345.\r\n",,terminal_output
|
| 152 |
+
151,244202,"TERMINAL",0,0,"]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 153 |
+
152,374723,"TERMINAL",0,0,"[?25ls[2mr[22m[18;53H[?25h[?25l[18;52Hr[18;53H[?25h",,terminal_output
|
| 154 |
+
153,374876,"TERMINAL",0,0,"[?25l[18;53Hu[18;55H[?25h[?25l[18;54Hn[18;56H[?25h",,terminal_output
|
| 155 |
+
154,374975,"TERMINAL",0,0,"[?25l[18;55H [18;56H[?25h",,terminal_output
|
| 156 |
+
155,377109,"TERMINAL",0,0,"[?25l[18;56H-[18;57H[?25h",,terminal_output
|
| 157 |
+
156,377347,"TERMINAL",0,0,"[?25l[18;57H-[18;58H[?25h",,terminal_output
|
| 158 |
+
157,377526,"TERMINAL",0,0,"[?25l[18;58Hn[18;60H[?25h",,terminal_output
|
| 159 |
+
158,377674,"TERMINAL",0,0,"[?25l[18;59Ht[18;61H[?25h",,terminal_output
|
| 160 |
+
159,377836,"TERMINAL",0,0,"[?25l[18;60Ha[18;63H[?25h[?25l[18;61Hs[18;63H[?25h",,terminal_output
|
| 161 |
+
160,378034,"TERMINAL",0,0,"[?25l[18;62Hk[18;64H[?25h[?25l[18;63Hs[18;64H[?25h",,terminal_output
|
| 162 |
+
161,378294,"TERMINAL",0,0,"[?25l[18;64H=[18;66H[?25h",,terminal_output
|
| 163 |
+
162,378467,"TERMINAL",0,0,"[?25l[18;65H8[18;66H[?25h",,terminal_output
|
| 164 |
+
163,378714,"TERMINAL",0,0,"[?25l[?2004l\r[?25hsrun: fatal: No command given to execute.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 165 |
+
164,381944,"TERMINAL",0,0,"[?25lb[2ma[22m[20;53H[?25h",,terminal_output
|
| 166 |
+
165,381996,"TERMINAL",0,0,"[?25l[20;52Ha[20;54H[?25h",,terminal_output
|
| 167 |
+
166,382073,"TERMINAL",0,0,"[?25l[20;53Hs[20;55H[?25h",,terminal_output
|
| 168 |
+
167,382134,"TERMINAL",0,0,"[?25l[20;54Hh[20;55H[?25h",,terminal_output
|
| 169 |
+
168,382242,"TERMINAL",0,0,"[?25l[20;55H [20;57H[?25h",,terminal_output
|
| 170 |
+
169,382287,"TERMINAL",0,0,"[?25l[20;56He[20;57H[?25h",,terminal_output
|
| 171 |
+
170,382501,"TERMINAL",0,0,"[?25l[20;57Hx[20;59H[?25h",,terminal_output
|
| 172 |
+
171,382564,"TERMINAL",0,0,"[?25l[20;58Hp[20;60H[?25h",,terminal_output
|
| 173 |
+
172,382650,"TERMINAL",0,0,"[?25l[20;59He[20;61H[?25h",,terminal_output
|
| 174 |
+
173,382713,"TERMINAL",0,0,"[?25l[20;60Hr[20;63H[?25h",,terminal_output
|
| 175 |
+
174,382783,"TERMINAL",0,0,"[?25l[20;61Hi[20;63H[?25h[?25l[20;62Hm[20;63H[?25h",,terminal_output
|
| 176 |
+
175,382949,"TERMINAL",0,0,"ents/",,terminal_output
|
| 177 |
+
176,383082,"TERMINAL",0,0,"[?25l[20;68Ht[20;69H[?25h",,terminal_output
|
| 178 |
+
177,383249,"TERMINAL",0,0,"[?25l[20;69Ho[20;71H[?25h[?25l[20;70Hk[20;71H[?25h",,terminal_output
|
| 179 |
+
178,383400,"TERMINAL",0,0,"enizer_",,terminal_output
|
| 180 |
+
179,384268,"TERMINAL",0,0,"[?25l[20;78Hg[20;79H[?25h",,terminal_output
|
| 181 |
+
180,384457,"TERMINAL",0,0,"[?25l[20;79Hr[20;80H[?25h",,terminal_output
|
| 182 |
+
181,384620,"TERMINAL",0,0,"ain_checkpointing.sh ",,terminal_output
|
| 183 |
+
182,384835,"TERMINAL",0,0,"[?25l[?2004l\r[?25hsrun: warning: can't honor --ntasks-per-node set to 4 which doesn't match the requested tasks 8 with the maximum number of requested nodes 1. Ignoring --ntasks-per-node.\r\nsrun: error: Unable to create step for job 13345: More processors requested than permitted\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 184 |
+
183,402218,"TERMINAL",0,0,"e",,terminal_output
|
| 185 |
+
184,402417,"TERMINAL",0,0,"[?25l[25;52Hx[25;54H[?25h",,terminal_output
|
| 186 |
+
185,402489,"TERMINAL",0,0,"[?25l[25;53Hi[25;55H[?25h",,terminal_output
|
| 187 |
+
186,402572,"TERMINAL",0,0,"[?25l[25;54Ht[25;55H[?25h",,terminal_output
|
| 188 |
+
187,402688,"TERMINAL",0,0,"[?25l[?2004l\rexit\r\n[?25hsrun: error: hai002: task 0: Exited with exit code 1\r\nsalloc: Relinquishing job allocation 13345\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;1",,terminal_output
|
| 189 |
+
188,410925,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=4 --mem=100G",,terminal_command
|
| 190 |
+
189,411026,"TERMINAL",0,0,"\r\n\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=4 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;Csalloc: Granted job allocation 13346\r\nsalloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
| 191 |
+
190,411346,"TERMINAL",0,0,"Running inside SLURM, Job ID 13346.\r\n",,terminal_output
|
| 192 |
+
191,411439,"TERMINAL",0,0,"]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 193 |
+
192,412564,"TERMINAL",0,0,"e",,terminal_output
|
| 194 |
+
193,412716,"TERMINAL",0,0,"[?25l[34;52Hx[34;54H[?25h",,terminal_output
|
| 195 |
+
194,412797,"TERMINAL",0,0,"[?25l[34;53Hi[34;54H[?25h",,terminal_output
|
| 196 |
+
195,412896,"TERMINAL",0,0,"[?25l[34;54Ht[34;55H[?25h",,terminal_output
|
| 197 |
+
196,412956,"TERMINAL",0,0,"[?25l[?2004l\rexit\r\nsalloc: Relinquishing job allocation 13346\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;0[?25h",,terminal_output
|
| 198 |
+
197,415898,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=5 --mem=100G",,terminal_command
|
| 199 |
+
198,415959,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=5 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C[?25hsalloc: Pending job allocation 13347\r\nsalloc: job 13347 queued and waiting for resources\r\n",,terminal_output
|
| 200 |
+
199,419258,"TERMINAL",0,0,"",,terminal_focus
|
| 201 |
+
200,422599,"TERMINAL",0,0,"squeue",,terminal_command
|
| 202 |
+
201,422607,"TERMINAL",0,0,"\r]633;A[franz.srambical@[31;47;1mhai-login2.haicore.berlin[m:~/jafar] $ ]633;Bsqueue\r\n[?2004l\r]633;E;squeue;6def2f5c-c4a9-4ad4-8a90-ea797733dc74]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n",,terminal_output
|
| 203 |
+
202,426751,"TERMINAL",0,0,"salloc",,terminal_focus
|
| 204 |
+
203,479574,"TERMINAL",0,0,"^Csalloc: Job allocation 13347 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;franz.srambical@hai-login2:~/jafar",,terminal_output
|
| 205 |
+
204,481639,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=5",,terminal_command
|
| 206 |
+
205,481686,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=5;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C[?25hsalloc: Pending job allocation 13348\r\nsalloc: job 13348 queued and waiting for resources\r\n",,terminal_output
|
| 207 |
+
206,483620,"TERMINAL",0,0,"bash",,terminal_focus
|
| 208 |
+
207,487752,"TERMINAL",0,0,"salloc",,terminal_focus
|
| 209 |
+
208,489649,"TERMINAL",0,0,"bash",,terminal_focus
|
| 210 |
+
209,491295,"TERMINAL",0,0,"squeue",,terminal_command
|
| 211 |
+
210,491312,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;squeue;6def2f5c-c4a9-4ad4-8a90-ea797733dc74]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n 13348 franz.sram interacti 1 5 PD 2025-07-11T12:26:30 N/A 0:00 1-00:00:00 (QOSMaxCpuPerUserLimit)\r\n",,terminal_output
|
| 212 |
+
211,494054,"TERMINAL",0,0,"salloc",,terminal_focus
|
| 213 |
+
212,539208,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=4 --mem=100G",,terminal_command
|
| 214 |
+
213,539279,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=4 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;Csalloc: Granted job allocation 13349\r\nsalloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
| 215 |
+
214,539594,"TERMINAL",0,0,"Running inside SLURM, Job ID 13349.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 216 |
+
215,541451,"TERMINAL",0,0,"[?25lb[2ma[22m[58;53H[?25h",,terminal_output
|
| 217 |
+
216,541656,"TERMINAL",0,0,"[?25l[58;52Ha[58;55H[?25h[?25l[58;53Hs[58;55H[?25h",,terminal_output
|
| 218 |
+
217,541684,"TERMINAL",0,0,"[?25l[58;54Hh[58;56H[?25h",,terminal_output
|
| 219 |
+
218,541864,"TERMINAL",0,0,"[?25l[58;55H [58;57H[?25h[?25l[58;56He[58;57H[?25h",,terminal_output
|
| 220 |
+
219,542070,"TERMINAL",0,0,"[?25l[58;57Hx[58;58Hp[58;59H[?25h",,terminal_output
|
| 221 |
+
220,542292,"TERMINAL",0,0,"[?25l[58;59He[58;63H[?25h[?25l[58;60Hr[58;63H[?25h",,terminal_output
|
| 222 |
+
221,542392,"TERMINAL",0,0,"[?25l[58;61Hi[58;62Hm[58;63H[?25h",,terminal_output
|
| 223 |
+
222,542544,"TERMINAL",0,0,"ents/",,terminal_output
|
| 224 |
+
223,542712,"TERMINAL",0,0,"[?25l[58;69H[X[58;68H[X[0mt[?25hok",,terminal_output
|
| 225 |
+
224,542904,"TERMINAL",0,0,"[?25l[58;71H[X[0menizer_[?25h",,terminal_output
|
| 226 |
+
225,543085,"TERMINAL",0,0,"g",,terminal_output
|
| 227 |
+
226,543284,"TERMINAL",0,0,"rain_checkpointing.sh ",,terminal_output
|
| 228 |
+
227,543784,"TERMINAL",0,0,"\r\n[?2004l\rsrun: error: Unable to create step for job 13349: More processors requested than permitted\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[franz.srambical@hai002.haicore.berlin:~/jafar] $ ",,terminal_output
|
| 229 |
+
228,850377,"TERMINAL",0,0,"[?25le[2mx[22m[2mi[22m[58;54H[?25h[?25l[58;52Hx[58;54H[?25h",,terminal_output
|
| 230 |
+
229,850735,"TERMINAL",0,0,"[?25l[58;53Hi[58;55H[?25h[?25l[58;54Ht[58;55H[?25h\r\n[?2004l\rexit\r\n",,terminal_output
|
| 231 |
+
230,851043,"TERMINAL",0,0,"srun: error: hai002: task 0: Exited with exit code 1\r\nsalloc: Relinquishing job allocation 13349\r\nsalloc: Job allocation 13349 has been revoked.\r\n",,terminal_output
|
| 232 |
+
231,853283,"TERMINAL",0,0,"squeue",,terminal_command
|
| 233 |
+
232,853285,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;squeue;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-42033ef5-0153-4063-bb24-8be488cec0e41763382064980-2025_11_17-13.21.21.725/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4475b0d1-da06-4a22-b499-8b87ae83c0be1764503869494-2025_11_30-12.58.11.549/source.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
2,1188,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:58:11 PM [info] Activating crowd-code\n12:58:11 PM [info] Recording started\n12:58:11 PM [info] Initializing git provider using file system watchers...\n12:58:11 PM [info] No workspace folder found\n",Log,tab
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-47e5b338-8100-4eb3-a0af-f85f93ebef961765779167850-2025_12_15-07.12.55.913/source.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
2,251,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:12:55 AM [info] Activating crowd-code\n7:12:55 AM [info] Recording started\n7:12:55 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 3 |
+
3,304,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"7:12:56 AM [info] Git repository found\n7:12:56 AM [info] Git provider initialized successfully\n7:12:56 AM [info] Initial git state: [object Object]\n",Log,content
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-48d4bcc9-cb57-44d4-8bb2-7caa4daeab621759566467891-2025_10_04-10.27.59.485/source.csv
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/speed_ablations/batch_size_36/coinrun_dynamics_grain_ablation.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=dynamics_coinrun_mila_submission_batch_size_36_grain_ablation\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\ncurrent_branch=$(git rev-parse --abbrev-ref HEAD)\nif [ ""$current_branch"" != ""ablation/use-pytorch-dataloader"" ]; then\n echo ""This script must be run from the ablation/use-pytorch-dataloader branch. Current branch is $current_branch. Exiting.""\n exit 1\nfi\n\n# Log the sbatch script\ncat $0\n\nsource .venv/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\ntags=""coinrun dynamics 50m_dataset mila_submission speed_test batch_size_36 ablation grain_ablation unicorn""\n\nnpy_files_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/jafar_coinrun_50M""\ntokenizer_ckpt_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/tokenizer/tokenizer_coinrun_mila_submission_29949""\nCHECKPOINT_DIR=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/dynamics/${job_name}/${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\n\nsrun python jasmine/train_dynamics.py \\n --patch_size=16 \\n --batch_size=36 \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags ${tags} \\n --entity instant-uv \\n --project jafar \\n --num_steps=100_000 \\n --log_interval=10_000 \\n --log_image_interval=200_000 \\n --tokenizer_checkpoint=""${tokenizer_ckpt_dir}"" \\n --data_dir=""${npy_files_dir}"" &\n\nchild_pid=$!\n\nwait $child_pid\n\n",shellscript,tab
|
| 3 |
+
2,1554,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:27:59 AM [info] Activating crowd-code\n10:27:59 AM [info] Recording started\n10:27:59 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 4 |
+
3,2412,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"10:28:00 AM [info] Git repository found\n10:28:00 AM [info] Git provider initialized successfully\n10:28:00 AM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,3516,"TERMINAL",0,0,"",,terminal_command
|
| 6 |
+
5,7673,"TERMINAL",0,0,"",,terminal_focus
|
| 7 |
+
6,7674,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/speed_ablations/batch_size_36/coinrun_dynamics_grain_ablation.sh",0,0,"",shellscript,tab
|
| 8 |
+
7,8552,"TERMINAL",0,0,"source /home/franz.srambical/jafar/.venv/bin/activate",,terminal_command
|
| 9 |
+
8,8564,"TERMINAL",0,0,"]633;C]0;franz.srambical@hai-login2:~/jafar",,terminal_output
|
| 10 |
+
9,10864,"TERMINAL",0,0,"",,terminal_command
|
| 11 |
+
10,27224,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/speed_ablations/batch_size_36/coinrun_dynamics_grain_ablation.sh",0,0,"",shellscript,selection_keyboard
|
| 12 |
+
11,35420,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/speed_ablations/batch_size_36/coinrun_dynamics_grain_ablation.sh",2049,0,"",shellscript,selection_keyboard
|
| 13 |
+
12,115262,"TERMINAL",0,0,"",,terminal_command
|
| 14 |
+
13,129287,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_dynamics_from_fully_trained_tokenizer.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=01:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit\n\n# Activate virtual environment\nsource .venv/bin/activate\n\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_500m_seed_w_increment/val""\nCHECKPOINT_PATH=""""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/dynamics/dynamics_coinrun_500m_dataset/29519""""\n\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --checkpoint $CHECKPOINT_PATH \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=4 \\n --patch_size=4 \\n --start_frame=4 \\n --image_height=64 \\n --image_width=64 \\n --dyna_type=maskgit\n",shellscript,tab
|
| 15 |
+
14,144713,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample",0,0,"",plaintext,tab
|
| 16 |
+
15,168577,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,tab
|
| 17 |
+
16,174385,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_dynamics_from_fully_trained_tokenizer.sh",0,0,"",shellscript,tab
|
| 18 |
+
17,175902,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_dynamics_from_fully_trained_tokenizer.sh",0,1085,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=01:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit\n\n# Activate virtual environment\nsource .venv/bin/activate\n\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_500m_seed_w_increment/val""\nCHECKPOINT_PATH=""""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/dynamics/dynamics_coinrun_500m_dataset/29519""""\n\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --checkpoint $CHECKPOINT_PATH \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=4 \\n --patch_size=4 \\n --start_frame=4 \\n --image_height=64 \\n --image_width=64 \\n --dyna_type=maskgit\n",shellscript,selection_command
|
| 19 |
+
18,176023,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_dynamics_from_fully_trained_tokenizer.sh",1085,0,"",shellscript,selection_command
|
| 20 |
+
19,176898,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,tab
|
| 21 |
+
20,177720,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=01:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/coinrun/dynamics/sampling/maskgit/%x_%j.log\n#SBATCH --job-name=coinrun_sample_maskgit\n\n# Activate virtual environment\nsource .venv/bin/activate\n\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_500m_seed_w_increment/val""\nCHECKPOINT_PATH=""""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/dynamics/dynamics_coinrun_500m_dataset/29519""""\n\n\necho ""Sampling from checkpoint: $CHECKPOINT_PATH""\n\nsrun python jasmine/sample.py \\n --checkpoint $CHECKPOINT_PATH \\n --data_dir=$array_records_dir \\n --seq_len=16 \\n --batch_size=4 \\n --patch_size=4 \\n --start_frame=4 \\n --image_height=64 \\n --image_width=64 \\n --dyna_type=maskgit\n",shellscript,content
|
| 22 |
+
21,178088,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,selection_command
|
| 23 |
+
22,179087,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",1085,0,"",shellscript,selection_keyboard
|
| 24 |
+
23,179391,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,selection_keyboard
|
| 25 |
+
24,180057,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",20,0,"",shellscript,selection_command
|
| 26 |
+
25,180327,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",21,0,"",shellscript,selection_command
|
| 27 |
+
26,180328,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",39,0,"",shellscript,selection_command
|
| 28 |
+
27,180362,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",67,0,"",shellscript,selection_command
|
| 29 |
+
28,180401,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",91,0,"",shellscript,selection_command
|
| 30 |
+
29,180425,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",123,0,"",shellscript,selection_command
|
| 31 |
+
30,180481,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",149,0,"",shellscript,selection_command
|
| 32 |
+
31,180571,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",170,0,"",shellscript,selection_command
|
| 33 |
+
32,180721,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",303,0,"",shellscript,selection_command
|
| 34 |
+
33,180889,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",170,0,"",shellscript,selection_command
|
| 35 |
+
34,181042,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",303,0,"",shellscript,selection_command
|
| 36 |
+
35,181225,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",435,0,"",shellscript,selection_command
|
| 37 |
+
36,181605,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",303,0,"",shellscript,selection_command
|
| 38 |
+
37,190820,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_dynamics_from_fully_trained_tokenizer.sh",0,0,"",shellscript,tab
|
| 39 |
+
38,191645,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,tab
|
| 40 |
+
39,192254,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_dynamics_from_fully_trained_tokenizer.sh",0,0,"",shellscript,tab
|
| 41 |
+
40,195371,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,tab
|
| 42 |
+
41,196201,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",1085,0,"",shellscript,selection_keyboard
|
| 43 |
+
42,196561,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,selection_keyboard
|
| 44 |
+
43,197121,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",20,0,"",shellscript,selection_command
|
| 45 |
+
44,197329,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",21,0,"",shellscript,selection_command
|
| 46 |
+
45,197361,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",39,0,"",shellscript,selection_command
|
| 47 |
+
46,197377,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",67,0,"",shellscript,selection_command
|
| 48 |
+
47,197409,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",91,0,"",shellscript,selection_command
|
| 49 |
+
48,197449,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",123,0,"",shellscript,selection_command
|
| 50 |
+
49,197481,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",149,0,"",shellscript,selection_command
|
| 51 |
+
50,197521,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",170,0,"",shellscript,selection_command
|
| 52 |
+
51,197545,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",303,0,"",shellscript,selection_command
|
| 53 |
+
52,197579,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",435,0,"",shellscript,selection_command
|
| 54 |
+
53,197618,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",477,0,"",shellscript,selection_command
|
| 55 |
+
54,198362,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",435,0,"",shellscript,selection_command
|
| 56 |
+
55,198753,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",476,0,"",shellscript,selection_command
|
| 57 |
+
56,199645,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",476,0,"_mila_submission",shellscript,content
|
| 58 |
+
57,200247,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",491,0,"",shellscript,selection_command
|
| 59 |
+
58,201537,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",493,0,"",shellscript,selection_command
|
| 60 |
+
59,201777,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",523,0,"",shellscript,selection_command
|
| 61 |
+
60,201803,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",549,0,"",shellscript,selection_command
|
| 62 |
+
61,201851,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",551,0,"",shellscript,selection_command
|
| 63 |
+
62,201883,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",608,0,"",shellscript,selection_command
|
| 64 |
+
63,201977,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",721,0,"",shellscript,selection_command
|
| 65 |
+
64,202177,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",790,0,"",shellscript,selection_command
|
| 66 |
+
65,202346,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",721,0,"",shellscript,selection_command
|
| 67 |
+
66,203922,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",785,3,"",shellscript,content
|
| 68 |
+
67,203922,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",784,0,"3303",shellscript,content
|
| 69 |
+
68,203923,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",782,2,"",shellscript,content
|
| 70 |
+
69,203923,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",769,0,"mila_submission_",shellscript,content
|
| 71 |
+
70,203923,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",682,1,"",shellscript,content
|
| 72 |
+
71,206081,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",797,5,"",shellscript,content
|
| 73 |
+
72,206098,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",797,0,"29519""",shellscript,content
|
| 74 |
+
73,206099,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",768,16,"",shellscript,content
|
| 75 |
+
74,206099,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",682,0,"""",shellscript,content
|
| 76 |
+
75,206121,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",720,0,"",shellscript,selection_command
|
| 77 |
+
76,233540,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics/%x_%j.log\n#SBATCH --job-name=dynamics_coinrun_mila_submission\n#SBATCH --requeue\n#SBATCH --signal=b:usr1@300 # 5 min before timeout\n\n# --- signal trap to requeue job before timeout ---\nrequeue_job() {\n echo ""[$(date)] caught sigusr1 (timeout warning), requeueing slurm job $SLURM_JOB_ID...""\n # optional: trigger checkpoint saving here\n # e.g., touch $checkpoint_dir/requeue_trigger\n scontrol requeue $SLURM_JOB_ID\n exit 0\n}\n\ntrap requeue_job sigusr1\n\n# set checkpoint flag based on restart count\nrestart_count=$(scontrol show job $SLURM_JOB_ID | grep -o 'Restarts=[0-9]*' | cut -d'=' -f2)\n\nif [ $restart_count -eq 0 ]; then\n restore_ckpt_flag=""--no-restore-ckpt""\nelse\n restore_ckpt_flag=""--restore-ckpt""\nfi\n\n\n\n# Log the sbatch script\ncat $0\n\nsource .venv/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\ntags=""coinrun dynamics 50m_dataset mila_submission unicorn""\n\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_500m_seed_w_increment""\ntokenizer_ckpt_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/tokenizer/tokenizer_coinrun_mila_submission_29949""\nCHECKPOINT_DIR=""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/dynamics/${job_name}/${slurm_job_id}""\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\ncurrent_branch=$(git rev-parse --abbrev-ref HEAD)\nif [ ""$current_branch"" != ""prepend-action-maskgit"" ]; then\n echo ""This script must be run from the prepend-action-maskgit branch. Current branch is $current_branch. Exiting.""\n exit 1\nfi\n\nsrun python jasmine/train_dynamics.py \\n --patch_size=16 \\n --save_ckpt \\n $restore_ckpt_flag \\n --wandb_id $SLURM_JOB_ID \\n --ckpt_dir $CHECKPOINT_DIR \\n --name=""${job_name}_${slurm_job_id}"" \\n --tags ${tags} \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=""${tokenizer_ckpt_dir}"" \\n --val_data_dir=""${array_records_dir}/val"" \\n --data_dir=""${array_records_dir}/train"" &\n\nchild_pid=$!\n\nwait $child_pid\n\n",shellscript,tab
|
| 78 |
+
77,240808,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",1220,0,"",shellscript,selection_command
|
| 79 |
+
78,241043,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",1329,0,"",shellscript,selection_command
|
| 80 |
+
79,241354,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",1220,0,"",shellscript,selection_command
|
| 81 |
+
80,241620,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",1219,0,"",shellscript,selection_command
|
| 82 |
+
81,241930,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",1220,0,"",shellscript,selection_command
|
| 83 |
+
82,251765,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,tab
|
| 84 |
+
83,253156,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",607,0,"",shellscript,selection_command
|
| 85 |
+
84,253390,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",664,0,"\narray_records_dir=""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_500m_seed_w_increment""",shellscript,content
|
| 86 |
+
85,253391,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",665,0,"",shellscript,selection_command
|
| 87 |
+
86,254186,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",552,0,"",shellscript,selection_command
|
| 88 |
+
87,255382,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",665,0,"",shellscript,selection_command
|
| 89 |
+
88,255796,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",773,0,"",shellscript,selection_command
|
| 90 |
+
89,256113,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",772,0,"",shellscript,selection_command
|
| 91 |
+
90,256650,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",772,0,"/",shellscript,content
|
| 92 |
+
91,256651,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",773,0,"",shellscript,selection_keyboard
|
| 93 |
+
92,256802,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",773,0,"v",shellscript,content
|
| 94 |
+
93,256802,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",774,0,"",shellscript,selection_keyboard
|
| 95 |
+
94,256971,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",774,0,"a",shellscript,content
|
| 96 |
+
95,256972,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",775,0,"",shellscript,selection_keyboard
|
| 97 |
+
96,257054,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",775,0,"l",shellscript,content
|
| 98 |
+
97,257054,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",776,0,"",shellscript,selection_keyboard
|
| 99 |
+
98,257281,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",775,0,"",shellscript,selection_command
|
| 100 |
+
99,257663,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",662,0,"",shellscript,selection_command
|
| 101 |
+
100,259606,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",552,113,"",shellscript,content
|
| 102 |
+
101,260486,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",665,0,"",shellscript,selection_command
|
| 103 |
+
102,264941,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",680,0,"",shellscript,selection_command
|
| 104 |
+
103,265079,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",684,0,"",shellscript,selection_command
|
| 105 |
+
104,265280,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",688,0,"",shellscript,selection_command
|
| 106 |
+
105,266598,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",683,104,"",shellscript,content
|
| 107 |
+
106,267542,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",682,1,"",shellscript,content
|
| 108 |
+
107,268114,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",683,0,"",shellscript,selection_command
|
| 109 |
+
108,268362,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",682,1,"",shellscript,content
|
| 110 |
+
109,268793,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",681,0,"",shellscript,selection_command
|
| 111 |
+
110,270965,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",995,0,"",shellscript,selection_keyboard
|
| 112 |
+
111,271813,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",971,0,"",shellscript,selection_command
|
| 113 |
+
112,272053,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",948,0,"",shellscript,selection_command
|
| 114 |
+
113,272077,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",924,0,"",shellscript,selection_command
|
| 115 |
+
114,272113,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",902,0,"",shellscript,selection_command
|
| 116 |
+
115,272158,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",881,0,"",shellscript,selection_command
|
| 117 |
+
116,272193,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",860,0,"",shellscript,selection_command
|
| 118 |
+
117,272237,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",841,0,"",shellscript,selection_command
|
| 119 |
+
118,272433,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",860,0,"",shellscript,selection_command
|
| 120 |
+
119,272654,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",841,0,"",shellscript,selection_command
|
| 121 |
+
120,273441,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",860,0,"",shellscript,selection_command
|
| 122 |
+
121,273853,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",881,0,"",shellscript,selection_command
|
| 123 |
+
122,275553,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",885,0,"",shellscript,selection_command
|
| 124 |
+
123,275781,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",887,0,"",shellscript,selection_command
|
| 125 |
+
124,275913,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",897,0,"",shellscript,selection_command
|
| 126 |
+
125,276073,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",898,0,"",shellscript,selection_command
|
| 127 |
+
126,276493,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",898,1,"",shellscript,content
|
| 128 |
+
127,277273,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",898,0,"1",shellscript,content
|
| 129 |
+
128,277273,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",899,0,"",shellscript,selection_keyboard
|
| 130 |
+
129,277417,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",899,0,"6",shellscript,content
|
| 131 |
+
130,277417,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",900,0,"",shellscript,selection_keyboard
|
| 132 |
+
131,278142,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",899,0,"",shellscript,selection_command
|
| 133 |
+
132,279234,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",996,0,"",shellscript,selection_keyboard
|
| 134 |
+
133,285393,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,selection_keyboard
|
| 135 |
+
134,285890,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",996,0,"",shellscript,selection_keyboard
|
| 136 |
+
135,292035,"TERMINAL",0,0,"git status",,terminal_command
|
| 137 |
+
136,292118,"TERMINAL",0,0,"]633;COn branch prepend-action-maskgit\r\nYour branch is up to date with 'origin/prepend-action-maskgit'.\r\n\r\nChanges not staged for commit:\r\n (use ""git add <file>..."" to update what will be committed)\r\n (use ""git restore <file>..."" to discard changes in working directory)\r\n\t[31mmodified: .gitignore[m\r\n\r\nUntracked files:\r\n (use ""git add <file>..."" to include in what will be committed)\r\n\t[31mdata/test_vis/[m\r\n\t[31mdata/uv.lock[m\r\n\t[31mslurm-29284.out[m\r\n\t[31mslurm/[m\r\n\t[31mthird_party/[m\r\n\t[31muv.lock[m\r\n\r\nno changes added to commit (use ""git add"" and/or ""git commit -a"")\r\n]0;franz.srambical@hai-login2:~/jafar",,terminal_output
|
| 138 |
+
137,311414,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,selection_keyboard
|
| 139 |
+
138,313793,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",20,0,"",shellscript,selection_command
|
| 140 |
+
139,314013,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",21,0,"",shellscript,selection_command
|
| 141 |
+
140,314033,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",39,0,"",shellscript,selection_command
|
| 142 |
+
141,314073,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",67,0,"",shellscript,selection_command
|
| 143 |
+
142,314213,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",91,0,"",shellscript,selection_command
|
| 144 |
+
143,314362,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",123,0,"",shellscript,selection_command
|
| 145 |
+
144,314514,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",149,0,"",shellscript,selection_command
|
| 146 |
+
145,314663,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",170,0,"",shellscript,selection_command
|
| 147 |
+
146,315153,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",303,0,"",shellscript,selection_command
|
| 148 |
+
147,322044,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",0,0,"",shellscript,tab
|
| 149 |
+
148,322396,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",0,0,"",shellscript,selection_keyboard
|
| 150 |
+
149,323133,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",20,0,"",shellscript,selection_command
|
| 151 |
+
150,323382,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",21,0,"",shellscript,selection_command
|
| 152 |
+
151,323410,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",39,0,"",shellscript,selection_command
|
| 153 |
+
152,323441,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",67,0,"",shellscript,selection_command
|
| 154 |
+
153,323489,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",91,0,"",shellscript,selection_command
|
| 155 |
+
154,323581,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",117,0,"",shellscript,selection_command
|
| 156 |
+
155,323582,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",138,0,"",shellscript,selection_command
|
| 157 |
+
156,323598,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",233,0,"",shellscript,selection_command
|
| 158 |
+
157,323947,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",234,0,"",shellscript,selection_command
|
| 159 |
+
158,325097,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,tab
|
| 160 |
+
159,326550,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",434,0,"\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics/%x_%j.log",shellscript,content
|
| 161 |
+
160,326551,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",435,0,"",shellscript,selection_command
|
| 162 |
+
161,327382,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",303,0,"",shellscript,selection_command
|
| 163 |
+
162,328024,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",303,132,"",shellscript,content
|
| 164 |
+
163,328834,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",0,0,"",shellscript,tab
|
| 165 |
+
164,329489,"slurm/jobs/franz/berlin/coinrun/mila_submission/50M_dataset/coinrun_dynamics_base.sh",139,0,"",shellscript,selection_command
|
| 166 |
+
165,330765,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",0,0,"",shellscript,tab
|
| 167 |
+
166,331242,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",170,0,"",shellscript,selection_command
|
| 168 |
+
167,331301,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",302,0,"\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/coinrun/dynamics/%x_%j.log",shellscript,content
|
| 169 |
+
168,331301,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",303,0,"",shellscript,selection_command
|
| 170 |
+
169,331642,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",170,0,"",shellscript,selection_command
|
| 171 |
+
170,331953,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",170,133,"",shellscript,content
|
| 172 |
+
171,335093,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",176,0,"",shellscript,selection_command
|
| 173 |
+
172,335334,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",179,0,"",shellscript,selection_command
|
| 174 |
+
173,335381,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",185,0,"",shellscript,selection_command
|
| 175 |
+
174,335393,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",187,0,"",shellscript,selection_command
|
| 176 |
+
175,335433,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",191,0,"",shellscript,selection_command
|
| 177 |
+
176,335473,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",192,0,"",shellscript,selection_command
|
| 178 |
+
177,335493,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",199,0,"",shellscript,selection_command
|
| 179 |
+
178,335724,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",200,0,"",shellscript,selection_command
|
| 180 |
+
179,335893,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",216,0,"",shellscript,selection_command
|
| 181 |
+
180,336153,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",217,0,"",shellscript,selection_command
|
| 182 |
+
181,336173,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",225,0,"",shellscript,selection_command
|
| 183 |
+
182,336196,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",226,0,"",shellscript,selection_command
|
| 184 |
+
183,336353,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",230,0,"",shellscript,selection_command
|
| 185 |
+
184,336553,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",231,0,"",shellscript,selection_command
|
| 186 |
+
185,336721,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",236,0,"",shellscript,selection_command
|
| 187 |
+
186,336854,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",237,0,"",shellscript,selection_command
|
| 188 |
+
187,337014,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",244,0,"",shellscript,selection_command
|
| 189 |
+
188,337294,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",245,0,"",shellscript,selection_command
|
| 190 |
+
189,338586,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",253,0,"",shellscript,selection_command
|
| 191 |
+
190,339545,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",254,0,"",shellscript,selection_command
|
| 192 |
+
191,340106,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",254,0,"-",shellscript,content
|
| 193 |
+
192,340106,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",255,0,"",shellscript,selection_keyboard
|
| 194 |
+
193,340705,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",254,1,"",shellscript,content
|
| 195 |
+
194,340956,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",254,0,"_",shellscript,content
|
| 196 |
+
195,340956,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",255,0,"",shellscript,selection_keyboard
|
| 197 |
+
196,341265,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",255,0,"s",shellscript,content
|
| 198 |
+
197,341266,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",256,0,"",shellscript,selection_keyboard
|
| 199 |
+
198,341277,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",256,0,"a",shellscript,content
|
| 200 |
+
199,341277,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",257,0,"",shellscript,selection_keyboard
|
| 201 |
+
200,341283,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",257,0,"m",shellscript,content
|
| 202 |
+
201,341283,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",258,0,"",shellscript,selection_keyboard
|
| 203 |
+
202,341585,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",258,0,"l",shellscript,content
|
| 204 |
+
203,341586,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",259,0,"",shellscript,selection_keyboard
|
| 205 |
+
204,342066,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",258,1,"",shellscript,content
|
| 206 |
+
205,342097,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",258,0,"p",shellscript,content
|
| 207 |
+
206,342097,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",259,0,"",shellscript,selection_keyboard
|
| 208 |
+
207,342306,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",259,0,"l",shellscript,content
|
| 209 |
+
208,342306,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",260,0,"",shellscript,selection_keyboard
|
| 210 |
+
209,342425,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",260,0,"e",shellscript,content
|
| 211 |
+
210,342425,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",261,0,"",shellscript,selection_keyboard
|
| 212 |
+
211,342946,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",260,0,"",shellscript,selection_command
|
| 213 |
+
212,343346,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",362,0,"",shellscript,selection_command
|
| 214 |
+
213,343746,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",361,0,"",shellscript,selection_command
|
| 215 |
+
214,343756,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",360,0,"",shellscript,selection_command
|
| 216 |
+
215,343756,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",359,0,"",shellscript,selection_command
|
| 217 |
+
216,343826,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",358,0,"",shellscript,selection_command
|
| 218 |
+
217,343994,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",357,0,"",shellscript,selection_command
|
| 219 |
+
218,344153,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",356,0,"",shellscript,selection_command
|
| 220 |
+
219,345444,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",355,0,"_sample",shellscript,content
|
| 221 |
+
220,345462,"slurm/dev/franz/berlin/coinrun/sample/maskgit/sample_mila_submission.sh",362,0,"",shellscript,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4a0c523e-3509-46d2-9ab8-f144f364f7ff1755356823323-2025_08_16-17.07.06.759/source.csv
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,7,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n # gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n # inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n # gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n # recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n # psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n # ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n # _, index_counts_lam = jnp.unique_counts(\n # jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n # )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n # codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n # psnr=psnr,\n # ssim=ssim,\n # codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (None, metrics)\n\n\n@nnx.jit\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n # dataloader = (\n # jax.make_array_from_process_local_data(videos_sharding, elem)\n # for elem in grain_iterator\n # )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for _ in range(100000):\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n pass\n # gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n # recon_seq = recon[0].clip(0, 1)\n # comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n # comparison_seq = einops.rearrange(\n # comparison_seq * 255, ""t h w c -> h (t w) c""\n # )\n # if jax.process_index() == 0:\n # log_images = dict(\n # image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n # recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n # true_vs_recon=wandb.Image(\n # np.asarray(comparison_seq.astype(np.uint8))\n # ),\n # )\n # wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
| 3 |
+
2,55,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:07:06 PM [info] Activating crowd-code\n5:07:06 PM [info] Recording started\n5:07:06 PM [info] Initializing git provider using file system watchers...\n5:07:06 PM [info] Git repository found\n5:07:06 PM [info] Git provider initialized successfully\n",Log,tab
|
| 4 |
+
3,125,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"5:07:06 PM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,5102,"train_dynamics.py",0,0,"",python,tab
|
| 6 |
+
5,6325,"train_dynamics.py",14301,0,"",python,selection_command
|
| 7 |
+
6,6456,"train_dynamics.py",13406,0,"",python,selection_command
|
| 8 |
+
7,6600,"train_dynamics.py",12313,0,"",python,selection_command
|
| 9 |
+
8,8781,"input_pipeline/download/download_array_records.sh",0,0,"#!/bin/bash\n\n# Download and extract array records from Hugging Face\n# \n# This script performs a two-step process:\n# 1. Downloads compressed array records from a Hugging Face dataset repository\n# 2. Extracts the compressed tar files in parallel for better performance\n#\n# Usage:\n# ./download_array_records.sh [hf_download_dir] [final_dataset_dir]\n#\n# Arguments:\n# hf_download_dir - Directory to store compressed downloads (default: data/minecraft_arrayrecords_compressed)\n# final_dataset_dir - Directory for extracted array records (default: data/minecraft_arrayrecords)\n\n# Set default directories if not provided as arguments\nhf_download_dir=""${1:-data/minecraft_arrayrecords_compressed}"" \nfinal_dataset_dir=""${2:-data/minecraft_arrayrecords}"" \n\nmkdir -p $hf_download_dir\nmkdir -p $final_dataset_dir\n\n# Step 1: Download compressed dataset from Hugging Face\necho ""Starting download from Hugging Face...""\nrepo_id=p-doom/open_ai_minecraft_arrayrecords_chunked\nstart_time_hf_download=$(date +%s)\n\nHF_HUB_ENABLE_HF_TRANSFER=1 HF_HUB_DISABLE_SYMLINKS=1 \\nhuggingface-cli download --repo-type dataset $repo_id --local-dir $hf_download_dir\n\nend_time_hf_download=$(date +%s)\necho ""Download completed. Time taken: $((end_time_hf_download - start_time_hf_download)) seconds""\n\n# Step 2: Extract compressed array records in parallel\necho ""Starting parallel extraction of tar files...""\nnum_workers=64 # Number of parallel extraction processes\nstart_time_uncompress=$(date +%s)\n\n# Find all shard tar files and extract them in parallel:\nxargs -0 -P $num_workers -I {} bash -c 'echo ""Extracting {}""; tar -xf ""{}"" -C ""'$final_dataset_dir'""'\n\nend_time_uncompress=$(date +%s)\n\n# Display timing summary\necho ""================================""\necho ""Extraction completed successfully!""\necho ""Uncompress time: $((end_time_uncompress - start_time_uncompress)) seconds""\necho ""Download time: $((end_time_hf_download - start_time_hf_download)) seconds""\necho ""Total time: $((end_time_uncompress - start_time_hf_download)) seconds""\necho ""Final dataset location: $final_dataset_dir""\n",shellscript,tab
|
| 10 |
+
9,9744,"train_dynamics.py",0,0,"",python,tab
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4a17e944-2634-4ccf-bfc0-d106691c39681765284626921-2025_12_09-13.50.38.396/source.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
2,587,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:50:38 PM [info] Activating crowd-code\n1:50:38 PM [info] Recording started\n1:50:38 PM [info] Initializing git provider using file system watchers...\n1:50:38 PM [info] No workspace folder found\n",Log,tab
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5457b093-d76e-4c99-8ea5-85e61f20bc071761477934870-2025_10_26-12.26.46.766/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-585eaec2-345a-44c8-b05a-f53a6d3046971761933036598-2025_10_31-18.50.42.679/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-59e65827-52ef-4a43-bfe9-199bbb450d831767620386754-2026_01_05-14.39.54.856/source.csv
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_16k_glm.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/glm/miles_hf_part_jsonl_16k_tokens/"" \\n--max-tokens-per-conversation 16384 \\n--tokenizer=""zai-org/GLM-4.5-Air""\n",shellscript,tab
|
| 3 |
+
2,230,"tasks",0,0,"",Log,tab
|
| 4 |
+
3,231,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_16k_glm.sh",0,0,"",shellscript,tab
|
| 5 |
+
4,304,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:39:54 PM [info] Activating crowd-code\n2:39:54 PM [info] Recording started\n2:39:54 PM [info] Initializing git provider using file system watchers...\n2:39:54 PM [info] Git repository found\n2:39:54 PM [info] Git provider initialized successfully\n2:39:54 PM [info] Initial git state: [object Object]\n",Log,tab
|
| 6 |
+
5,8734,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_16k_glm.sh",0,0,"",shellscript,tab
|
| 7 |
+
6,2690827,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_128k_glm.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/glm/miles_hf_part_jsonl_128k_tokens/"" \\n--max-tokens-per-conversation 131072 \\n--tokenizer=""zai-org/GLM-4.5-Air""\n",shellscript,tab
|
| 8 |
+
7,2692370,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_32k_glm.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/glm/miles_hf_part_jsonl_32k_tokens/"" \\n--max-tokens-per-conversation 32768 \\n--tokenizer=""zai-org/GLM-4.5-Air""\n",shellscript,tab
|
| 9 |
+
8,2692574,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_8k_glm.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/glm/miles_hf_part_jsonl_8k_tokens/"" \\n--max-tokens-per-conversation 8192 \\n--tokenizer=""zai-org/GLM-4.5-Air""",shellscript,tab
|
| 10 |
+
9,2693059,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_4k_glm.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/glm/miles_hf_part_jsonl_4k_tokens/"" \\n--max-tokens-per-conversation 4096 \\n--tokenizer=""zai-org/GLM-4.5-Air""",shellscript,tab
|
| 11 |
+
10,2693413,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_128k_qwen.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/qwen/miles_hf_part_jsonl_128k_tokens/"" \\n--max-tokens-per-conversation 131072 \\n--tokenizer=""Qwen/Qwen3-8B""",shellscript,tab
|
| 12 |
+
11,2693582,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_32k_qwen.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/qwen/miles_hf_part_jsonl_32k_tokens/"" \\n--max-tokens-per-conversation 32768 \\n--tokenizer=""Qwen/Qwen3-8B""",shellscript,tab
|
| 13 |
+
12,2693926,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_16k_qwen.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/qwen/miles_hf_part_jsonl_16k_tokens/"" \\n--max-tokens-per-conversation 16384 \\n--tokenizer=""Qwen/Qwen3-8B""",shellscript,tab
|
| 14 |
+
13,2694092,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_8k_qwen.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/qwen/miles_hf_part_jsonl_4k_tokens/"" \\n--max-tokens-per-conversation 8192 \\n--tokenizer=""Qwen/Qwen3-8B""",shellscript,tab
|
| 15 |
+
14,2694252,"/home/franz.srambical/slurm/dev/franz/berlin/crowd-pilot/crowd_pilot_serializer/serialize_4k_qwen.sh",0,0,"./target/release/crowd-pilot-serialize \\n--csv-root=""/fast/project/HFMI_SynergyUnit/tab_model/data/hf_part_csv/"" \\n--output-dir=""/fast/project/HFMI_SynergyUnit/tab_model/data/qwen/miles_hf_part_jsonl_4k_tokens/"" \\n--max-tokens-per-conversation 4096 \\n--tokenizer=""Qwen/Qwen3-8B""",shellscript,tab
|
| 16 |
+
15,2694435,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/tab_model/logs/franz/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/tab_model/logs/franz/%x_%j.log\n#SBATCH --job-name=crowd_pilot_sglang\n#SBATCH --mem=400GB\n#SBATCH --qos=normal\n\nexport HF_HOME=/fast/project/HFMI_SynergyUnit/tab_model/franz/hf_home/\n\nsource /home/franz.srambical/crowd-pilot-serializer-legacy/.venv/bin/activate\nmodule load CUDA/12.8\n\nmodel_path=""zai-org/GLM-4.5-Air""\npython3 -m sglang.launch_server --model-path $model_path --host 0.0.0.0 --log-requests \\n --tp-size 8 \\n --tool-call-parser glm45 \\n --reasoning-parser glm45 \\n --speculative-algorithm EAGLE \\n --speculative-num-steps 3 \\n --speculative-eagle-topk 1 \\n --speculative-num-draft-tokens 4 \\n --mem-fraction-static 0.9",shellscript,tab
|
| 17 |
+
16,7718995,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",117,569,"#SBATCH --gres=gpu:4\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/tab_model/logs/franz/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/tab_model/logs/franz/%x_%j.log\n#SBATCH --job-name=crowd_pilot_sglang\n#SBATCH --mem=400GB\n#SBATCH --qos=normal\n\nexport HF_HOME=/fast/project/HFMI_SynergyUnit/tab_model/franz/hf_home/\n\nsource /home/franz.srambical/crowd-pilot-serializer-legacy/.venv/bin/activate\nmodule load CUDA/12.8\n\nmodel_path=""zai-org/GLM-4.5-Air""\npython3 -m sglang.launch_server --model-path $model_path --host 0.0.0.0 --log-requests \\n --tp-size 4 \\n",shellscript,content
|
| 18 |
+
17,8012403,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 19 |
+
18,8013939,"TERMINAL",0,0,"",,terminal_focus
|
| 20 |
+
19,8013939,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",0,0,"",shellscript,tab
|
| 21 |
+
20,8019269,"TERMINAL",0,0,"curl -fsSL https://claude.ai/install.sh | bash",,terminal_command
|
| 22 |
+
21,8019316,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 23 |
+
22,8046929,"TERMINAL",0,0,"Setting up Claude Code...\r\n",,terminal_output
|
| 24 |
+
23,8047734,"TERMINAL",0,0,"[?2026h[?25l\r\n[38;2;215;119;87mChecking installation status...[39m\r\n[?2026l[?25l",,terminal_output
|
| 25 |
+
24,8047856,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n[38;2;215;119;87mInstalling Claude Code native build latest...[39m\r\n[?2026l",,terminal_output
|
| 26 |
+
25,8058183,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n[38;2;215;119;87mSetting up launcher and shell integration...[39m\r\n[?2026l",,terminal_output
|
| 27 |
+
26,8058245,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n[38;2;78;186;101m✔ [1mClaude Code successfully installed![22m[39m\r\n\r\n [38;2;153;153;153mVersion: [38;2;215;119;87m2.0.76[39m\r\n\r\n [38;2;153;153;153mLocation: [38;2;255;255;255m~/.local/bin/claude[39m\r\n\r\n\r\n [38;2;153;153;153mNext: Run [38;2;215;119;87m[1mclaude --help[22m[38;2;153;153;153m to get started[39m\r\n[?2026l",,terminal_output
|
| 28 |
+
27,8060208,"TERMINAL",0,0,"[?2026h[?25h[?2026l[?25h\r\n✅ Installation complete!\r\n\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-serializer",,terminal_output
|
| 29 |
+
28,8146009,"TERMINAL",0,0,"claude",,terminal_command
|
| 30 |
+
29,8146009,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 31 |
+
30,8146681,"TERMINAL",0,0,"[2J[3J[H",,terminal_output
|
| 32 |
+
31,8146744,"TERMINAL",0,0,"[?2026h[38;2;215;119;87mWelcome to Claude Code [38;2;153;153;153mv2.0.76 [39m\r\n…………………………………………………………………………………………………………………………………………………………\r\n\r\n * █████▓▓░\r\n * ███▓░ ░░\r\n ░░░░░░ ███▓░\r\n ░░░ ░░░░░░░░░░ ███▓░\r\n ░░░░░░░░░░░░░░░░░░░ [1m*[22m ██▓░░ ▓\r\n ░▓▓███▓▓░\r\n * ░░░░\r\n ░░░░░░░░\r\n ░░░░░░░░░░░░░░░░\r\n [38;2;215;119;87m █████████ [39m [38;2;153;153;153m*[39m\r\n [38;2;215;119;87m██▄█████▄██[39m [1m*[22m\r\n [38;2;215;119;87m █████████ [39m *\r\n…………………[38;2;215;119;87m█ █ █ █[39m………………………………………………………………………………………………………………\r\n\r\n[?2026l[?25l[?2004h",,terminal_output
|
| 33 |
+
32,8147006,"TERMINAL",0,0,"[?2026h[?25l[2K[1A[2K[G\r\n Let's get started.\r\n\r\n [1mChoose the text style that looks best with your terminal[22m\r\n [38;2;153;153;153mTo change this later, run /theme[39m\r\n\r\n [38;2;177;185;249m❯[39m [38;2;153;153;153m1. [38;2;78;186;101mDark mode[39m [38;2;78;186;101m✔[39m\r\n [38;2;153;153;153m2. [39mLight mode\r\n [38;2;153;153;153m3. [39mDark mode (colorblind-friendly)\r\n [38;2;153;153;153m4. [39mLight mode (colorblind-friendly)\r\n [38;2;153;153;153m5. [39mDark mode (ANSI colors only)\r\n [38;2;153;153;153m6. [39mLight mode (ANSI colors only)\r\n\r\n[2m[38;2;80;80;80m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;248;248;242m[2m 1 [22m [38;2;102;217;239mfunction[38;2;166;226;46m greet[38;2;255;255;255m()[38;2;248;248;242m {[39m\r\n[48;2;61;1;0m[38;2;220;90;90m 2 -[38;2;248;248;242m console.log(""Hello, [48;2;92;2;0mWorld[48;2;61;1;0m!""); [39m[49m\r\n[48;2;2;40;0m[38;2;80;200;80m 2 +[38;2;248;248;242m [38;2;166;226;46mconsole[38;2;255;255;255m.[38;2;102;217;239mlog[38;2;255;255;255m([38;2;230;219;116m""Hello, [48;2;4;71;0mClaude[48;2;2;40;0m!""[38;2;255;255;255m)[38;2;248;248;242m; [39m[49m\r\n[38;2;248;248;242m[2m 3 [22m }[39m\r\n[2m[38;2;80;80;80m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;153;153;153m Syntax theme: Monokai Extended (ctrl+t to disable)[39m\r\n[?2026l",,terminal_output
|
| 34 |
+
33,8152215,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n Let's get started.\r\n\r\n [1mChoose the text style that looks best with your terminal[22m\r\n [38;2;153;153;153mTo change this later, run /theme[39m\r\n\r\n [38;2;153;153;153m1. [38;2;78;186;101mDark mode[39m [38;2;78;186;101m✔[39m\r\n [38;2;177;185;249m❯[39m [38;2;153;153;153m2. [38;2;177;185;249mLight mode[39m\r\n [38;2;153;153;153m3. [39mDark mode (colorblind-friendly)\r\n [38;2;153;153;153m4. [39mLight mode (colorblind-friendly)\r\n [38;2;153;153;153m5. [39mDark mode (ANSI colors only)\r\n [38;2;153;153;153m6. [39mLight mode (ANSI colors only)\r\n\r\n[2m[38;2;80;80;80m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;248;248;242m[2m 1 [22m [38;2;102;217;239mfunction[38;2;166;226;46m greet[38;2;255;255;255m()[38;2;248;248;242m {[39m\r\n[48;2;61;1;0m[38;2;220;90;90m 2 -[38;2;248;248;242m console.log(""Hello, [48;2;92;2;0mWorld[48;2;61;1;0m!""); [39m[49m\r\n[48;2;2;40;0m[38;2;80;200;80m 2 +[38;2;248;248;242m [38;2;166;226;46mconsole[38;2;255;255;255m.[38;2;102;217;239mlog[38;2;255;255;255m([38;2;230;219;116m""Hello, [48;2;4;71;0mClaude[48;2;2;40;0m!""[38;2;255;255;255m)[38;2;248;248;242m; [39m[49m\r\n[38;2;248;248;242m[2m 3 [22m }[39m\r\n[2m[38;2;80;80;80m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌���╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;153;153;153m Syntax theme: Monokai Extended (ctrl+t to disable)[39m\r\n[?2026l[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n Let's get started.\r\n\r\n [1mChoose the text style that looks best with your terminal[22m\r\n [38;2;102;102;102mTo change this later, run /theme[39m\r\n\r\n [38;2;102;102;102m1. [38;2;44;122;57mDark mode[39m [38;2;44;122;57m✔[39m\r\n [38;2;87;105;247m❯[39m [38;2;102;102;102m2. [38;2;87;105;247mLight mode[39m\r\n [38;2;102;102;102m3. [39mDark mode (colorblind-friendly)\r\n [38;2;102;102;102m4. [39mLight mode (colorblind-friendly)\r\n [38;2;102;102;102m5. [39mDark mode (ANSI colors only)\r\n [38;2;102;102;102m6. [39mLight mode (ANSI colors only)\r\n\r\n[2m[38;2;175;175;175m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;51;51;51m[2m 1 [22m [38;2;167;29;93mfunction[38;2;51;51;51m [38;2;121;93;163mgreet[38;2;51;51;51m() {[39m\r\n[48;2;255;220;220m[38;2;207;34;46m 2 -[38;2;51;51;51m console.log(""Hello, [48;2;255;199;199mWorld[48;2;255;220;220m!""); [39m[49m\r\n[48;2;220;255;220m[38;2;36;138;61m 2 +[38;2;51;51;51m [38;2;0;134;179mconsole[38;2;167;29;93m.[38;2;0;134;179mlog[38;2;51;51;51m([38;2;24;54;145m""Hello, [48;2;178;255;178mClaude[48;2;220;255;220m!""[38;2;51;51;51m); [39m[49m\r\n[38;2;51;51;51m[2m 3 [22m }[39m\r\n[2m[38;2;175;175;175m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;102;102;102m Syntax theme: GitHub (ctrl+t to disable)[39m\r\n[?2026l",,terminal_output
|
| 35 |
+
34,8152895,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n Let's get started.\r\n\r\n [1mChoose the text style that looks best with your terminal[22m\r\n [38;2;102;102;102mTo change this later, run /theme[39m\r\n\r\n [38;2;87;105;247m❯[39m [38;2;102;102;102m1. [38;2;44;122;57mDark mode[39m [38;2;44;122;57m✔[39m\r\n [38;2;102;102;102m2. [39mLight mode\r\n [38;2;102;102;102m3. [39mDark mode (colorblind-friendly)\r\n [38;2;102;102;102m4. [39mLight mode (colorblind-friendly)\r\n [38;2;102;102;102m5. [39mDark mode (ANSI colors only)\r\n [38;2;102;102;102m6. [39mLight mode (ANSI colors only)\r\n\r\n[2m[38;2;175;175;175m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;51;51;51m[2m 1 [22m [38;2;167;29;93mfunction[38;2;51;51;51m [38;2;121;93;163mgreet[38;2;51;51;51m() {[39m\r\n[48;2;255;220;220m[38;2;207;34;46m 2 -[38;2;51;51;51m console.log(""Hello, [48;2;255;199;199mWorld[48;2;255;220;220m!""); [39m[49m\r\n[48;2;220;255;220m[38;2;36;138;61m 2 +[38;2;51;51;51m [38;2;0;134;179mconsole[38;2;167;29;93m.[38;2;0;134;179mlog[38;2;51;51;51m([38;2;24;54;145m""Hello, [48;2;178;255;178mClaude[48;2;220;255;220m!""[38;2;51;51;51m); [39m[49m\r\n[38;2;51;51;51m[2m 3 [22m }[39m\r\n[2m[38;2;175;175;175m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;102;102;102m Syntax theme: GitHub (ctrl+t to disable)[39m\r\n[?2026l[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n Let's get started.\r\n\r\n [1mChoose the text style that looks best with your terminal[22m\r\n [38;2;153;153;153mTo change this later, run /theme[39m\r\n\r\n [38;2;177;185;249m❯[39m [38;2;153;153;153m1. [38;2;78;186;101mDark mode[39m [38;2;78;186;101m✔[39m\r\n [38;2;153;153;153m2. [39mLight mode\r\n [38;2;153;153;153m3. [39mDark mode (colorblind-friendly)\r\n [38;2;153;153;153m4. [39mLight mode (colorblind-friendly)\r\n [38;2;153;153;153m5. [39mDark mode (ANSI colors only)\r\n [38;2;153;153;153m6. [39mLight mode (ANSI colors only)\r\n\r\n[2m[38;2;80;80;80m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;248;248;242m[2m 1 [22m [38;2;102;217;239mfunction[38;2;166;226;46m greet[38;2;255;255;255m()[38;2;248;248;242m {[39m\r\n[48;2;61;1;0m[38;2;220;90;90m 2 -[38;2;248;248;242m console.log(""Hello, [48;2;92;2;0mWorld[48;2;61;1;0m!""); [39m[49m\r\n[48;2;2;40;0m[38;2;80;200;80m 2 +[38;2;248;248;242m [38;2;166;226;46mconsole[38;2;255;255;255m.[38;2;102;217;239mlog[38;2;255;255;255m([38;2;230;219;116m""Hello, [48;2;4;71;0mClaude[48;2;2;40;0m!""[38;2;255;255;255m)[38;2;248;248;242m; [39m[49m\r\n[38;2;248;248;242m[2m 3 [22m }[39m\r\n[2m[38;2;80;80;80m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;153;153;153m Syntax theme: Monokai Extended (ctrl+t to disable)[39m\r\n[?2026l",,terminal_output
|
| 36 |
+
35,8153427,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n Let's get started.\r\n\r\n [1mChoose the text style that looks best with your terminal[22m\r\n [38;2;153;153;153mTo change this later, run /theme[39m\r\n\r\n [38;2;153;153;153m1. [38;2;78;186;101mDark mode[39m [38;2;78;186;101m✔[39m\r\n [38;2;177;185;249m❯[39m [38;2;153;153;153m2. [38;2;177;185;249mLight mode[39m\r\n [38;2;153;153;153m3. [39mDark mode (colorblind-friendly)\r\n [38;2;153;153;153m4. [39mLight mode (colorblind-friendly)\r\n [38;2;153;153;153m5. [39mDark mode (ANSI colors only)\r\n [38;2;153;153;153m6. [39mLight mode (ANSI colors only)\r\n\r\n[2m[38;2;80;80;80m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;248;248;242m[2m 1 [22m [38;2;102;217;239mfunction[38;2;166;226;46m greet[38;2;255;255;255m()[38;2;248;248;242m {[39m\r\n[48;2;61;1;0m[38;2;220;90;90m 2 -[38;2;248;248;242m console.log(""Hello, [48;2;92;2;0mWorld[48;2;61;1;0m!""); [39m[49m\r\n[48;2;2;40;0m[38;2;80;200;80m 2 +[38;2;248;248;242m [38;2;166;226;46mconsole[38;2;255;255;255m.[38;2;102;217;239mlog[38;2;255;255;255m([38;2;230;219;116m""Hello, [48;2;4;71;0mClaude[48;2;2;40;0m!""[38;2;255;255;255m)[38;2;248;248;242m; [39m[49m\r\n[38;2;248;248;242m[2m 3 [22m }[39m\r\n[2m[38;2;80;80;80m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;153;153;153m Syntax theme: Monokai Extended (ctrl+t to disable)[39m\r\n[?2026l[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n Let's get started.\r\n\r\n [1mChoose the text style that looks best with your terminal[22m\r\n [38;2;102;102;102mTo change this later, run /theme[39m\r\n\r\n [38;2;102;102;102m1. [38;2;44;122;57mDark mode[39m [38;2;44;122;57m✔[39m\r\n [38;2;87;105;247m❯[39m [38;2;102;102;102m2. [38;2;87;105;247mLight mode[39m\r\n [38;2;102;102;102m3. [39mDark mode (colorblind-friendly)\r\n [38;2;102;102;102m4. [39mLight mode (colorblind-friendly)\r\n [38;2;102;102;102m5. [39mDark mode (ANSI colors only)\r\n [38;2;102;102;102m6. [39mLight mode (ANSI colors only)\r\n\r\n[2m[38;2;175;175;175m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;51;51;51m[2m 1 [22m [38;2;167;29;93mfunction[38;2;51;51;51m [38;2;121;93;163mgreet[38;2;51;51;51m() {[39m\r\n[48;2;255;220;220m[38;2;207;34;46m 2 -[38;2;51;51;51m console.log(""Hello, [48;2;255;199;199mWorld[48;2;255;220;220m!""); [39m[49m\r\n[48;2;220;255;220m[38;2;36;138;61m 2 +[38;2;51;51;51m [38;2;0;134;179mconsole[38;2;167;29;93m.[38;2;0;134;179mlog[38;2;51;51;51m([38;2;24;54;145m""Hello, [48;2;178;255;178mClaude[48;2;220;255;220m!""[38;2;51;51;51m); [39m[49m\r\n[38;2;51;51;51m[2m 3 [22m }[39m\r\n[2m[38;2;175;175;175m╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌��╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌[39m[22m\r\n[38;2;102;102;102m Syntax theme: GitHub (ctrl+t to disable)[39m\r\n[?2026l",,terminal_output
|
| 37 |
+
36,8155999,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n\r\n [1mClaude Code can be used with your Claude subscription or billed based on API usage through your Console account.[22m\r\n\r\n Select login method:\r\n\r\n [38;2;87;105;247m❯[39m [38;2;102;102;102m1. [38;2;87;105;247mClaude account with subscription · [38;2;102;102;102mPro, Max, Team, or Enterprise[39m\r\n\r\n [38;2;102;102;102m2. [39mAnthropic Console account · [38;2;102;102;102mAPI usage billing[39m\r\n\r\n[?2026l",,terminal_output
|
| 38 |
+
37,8161287,"TERMINAL",0,0,"[?2026h[?25h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[?2026l",,terminal_output
|
| 39 |
+
38,8161343,"TERMINAL",0,0,"[?2026h[?25l[2K[1A[2K[G\r\n [38;2;0;0;0m·[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 40 |
+
39,8161513,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✢[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 41 |
+
40,8161549,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m*[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 42 |
+
41,8161671,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✶[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 43 |
+
42,8161792,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✻[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 44 |
+
43,8161935,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✽[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 45 |
+
44,8162210,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✻[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 46 |
+
45,8162277,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✶[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 47 |
+
46,8162407,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m*[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 48 |
+
47,8162515,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✢[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 49 |
+
48,8162647,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m·[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 50 |
+
49,8162908,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✢[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 51 |
+
50,8163002,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m*[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 52 |
+
51,8163118,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✶[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 53 |
+
52,8163244,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✻[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 54 |
+
53,8163372,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✽[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 55 |
+
54,8163620,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✻[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 56 |
+
55,8163729,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✶[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 57 |
+
56,8163848,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m*[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 58 |
+
57,8163968,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m✢[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 59 |
+
58,8164104,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;0;0;0m·[39m Opening browser to sign in…\r\n[?2026l",,terminal_output
|
| 60 |
+
59,8164318,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[G\r\n [38;2;102;102;102mBrowser didn't open? Use the url below to sign in:[39m\r\n\r\n]8;;https://claude.ai/oauth/authorize?code=true&client_id=9d1c250a-e61b-44d9-88ed-5944d1962f5e&response_type=code&redirect_uri=https%3A%2F%2Fconsole.anthropic.com%2Foauth%2Fcode%2Fcallback&scope=org%3Acreate_api_key+user%3Aprofile+user%3Ainference+user%3Asessions%3Aclaude_code&code_challenge=QvdXmPd1qBwBH9-gIluX9G4FlgqNnfvU-xnXopkZHro&code_challenge_method=S256&state=zkdlV0Jiqk_FQTHeyRArjJ6NVy8EKZ3GtqmwuRdUmAI[38;2;102;102;102mhttps://claude.ai/oauth/authorize?code=true&client_id=9d1c250a-e61b-44d9-88ed-5944d1962f5e&response_type=code&redirect_uri=https%3A%2F%2Fconsole.anthropic.com%2Foauth%2Fcode%2Fcallback&scope=org%3Acreate_api_key+user%3Aprofile+user%3Ainference+user%3Asessions%3Aclaude_code&code_challenge=QvdXmPd1qBwBH9-gIluX9G4FlgqNnfvU-xnXopkZHro&code_challenge_method=S256&state=zkdlV0Jiqk_FQTHeyRArjJ6NVy8EKZ3GtqmwuRdUmAI[39m]8;;\r\n\r\n\r\n Paste code here if prompted >\r\n[?2026l[?1004h",,terminal_output
|
| 61 |
+
60,8262211,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n [38;2;102;102;102mLogged in as nkalyanv@gmail.com[39m\r\n [38;2;44;122;57mLogin successful. Press [1mEnter[22m to continue…[39m\r\n[?2026l[?1004l",,terminal_output
|
| 62 |
+
61,8276279,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[G\r\n [1mSecurity notes:[22m\r\n\r\n Claude can make mistakes\r\n [38;2;102;102;102mYou should always review Claude's responses, especially when[39m\r\n [38;2;102;102;102mrunning code.[39m\r\n\r\n Due to prompt injection risks, only use it with code you trust\r\n [38;2;102;102;102mFor more details see:[39m\r\n ]8;;https://code.claude.com/docs/en/security[38;2;102;102;102mhttps://code.claude.com/docs/en/security[39m]8;;\r\n\r\n [38;2;87;105;247mPress [1mEnter[22m to continue…[39m\r\n[?2026l",,terminal_output
|
| 63 |
+
62,8285297,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n [1mUse Claude Code's terminal setup?[22m\r\n\r\n For the optimal coding experience, enable the recommended settings\r\n for your terminal: Shift+Enter for newlines\r\n\r\n [38;2;87;105;247m❯[39m [38;2;102;102;102m1. [38;2;87;105;247mYes, use recommended settings[39m\r\n [38;2;102;102;102m2. [39mNo, maybe later with /terminal-setup\r\n\r\n [38;2;102;102;102mEnter to confirm · Esc to skip[39m\r\n[?2026l",,terminal_output
|
| 64 |
+
63,8287919,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n [1mUse Claude Code's terminal setup?[22m\r\n\r\n For the optimal coding experience, enable the recommended settings\r\n for your terminal: Shift+Enter for newlines\r\n\r\n [38;2;102;102;102m1. [39mYes, use recommended settings\r\n [38;2;87;105;247m❯[39m [38;2;102;102;102m2. [38;2;87;105;247mNo, maybe later with /terminal-setup[39m\r\n\r\n [38;2;102;102;102mEnter to confirm · Esc to skip[39m\r\n[?2026l",,terminal_output
|
| 65 |
+
64,8288179,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n [1mUse Claude Code's terminal setup?[22m\r\n\r\n For the optimal coding experience, enable the recommended settings\r\n for your terminal: Shift+Enter for newlines\r\n\r\n [38;2;87;105;247m❯[39m [38;2;102;102;102m1. [38;2;87;105;247mYes, use recommended settings[39m\r\n [38;2;102;102;102m2. [39mNo, maybe later with /terminal-setup\r\n\r\n [38;2;102;102;102mEnter to confirm · Esc to skip[39m\r\n[?2026l",,terminal_output
|
| 66 |
+
65,8297721,"TERMINAL",0,0,"[?2026h[?25h[?2026l[2J[3J[H[?25h[?2004l[?2026h[?25l\r\n[38;2;150;108;30m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m\r\n [38;2;150;108;30m[1mDo you trust the files in this folder?[22m[39m\r\n\r\n [1m/fast/home/franz.srambical/crowd-pilot-serializer[22m\r\n\r\n Claude Code may read, write, or execute files contained in this directory. This can pose security risks, so only use files from trusted sources.\r\n\r\n ]8;;https://code.claude.com/docs/en/security[38;2;102;102;102mLearn more[39m]8;;\r\n\r\n [38;2;87;105;247m❯[39m [38;2;102;102;102m1. [38;2;87;105;247mYes, proceed[39m\r\n [38;2;102;102;102m2. [39mNo, exit\r\n\r\n [38;2;102;102;102mEnter to confirm · Esc to cancel[39m\r\n[?2026l[?25l[?2004h",,terminal_output
|
| 67 |
+
66,8320499,"TERMINAL",0,0,"[?2026h[?25h[?2026l[2J[3J[H[?25h[?2004l",,terminal_output
|
| 68 |
+
67,8320787,"TERMINAL",0,0,"[?25l[?2004h",,terminal_output
|
| 69 |
+
68,8321047,"TERMINAL",0,0,"[?25h[?2004l",,terminal_output
|
| 70 |
+
69,8321181,"TERMINAL",0,0,"[?2026h\r\n[38;2;215;119;87m╭─── Claude Code [38;2;102;102;102mv2.0.76[38;2;215;119;87m ────────────────────────────────────────────────────────────────────────────────────────────────╮[39m\r\n[38;2;215;119;87m│[39m [2m[38;2;215;119;87m│[39m[22m [38;2;215;119;87m[1mTips for getting started[22m[39m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [1mWelcome back Kalyan![22m [2m[38;2;215;119;87m│[39m[22m Run /init to create a CLAUDE.md file with instructions for Claude [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [2m[38;2;215;119;87m│[39m[22m [2m[38;2;215;119;87m─────────────────────────────────────────────────────────────────[39m[22m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [38;2;0;0;0m *[38;2;173;216;230m ▐[48;2;0;0;0m▛███▜[49m▌[38;2;0;0;0m *[39m [2m[38;2;215;119;87m│[39m[22m [38;2;215;119;87m[1mRecent activity[22m[39m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [38;2;0;0;0m*[38;2;173;216;230m ▝▜[48;2;0;0;0m█████[49m▛▘[38;2;0;0;0m *[39m [2m[38;2;215;119;87m│[39m[22m [38;2;102;102;102mNo recent activity[39m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [38;2;0;0;0m * [38;2;173;216;230m ▘▘ ▝▝ [38;2;0;0;0m*[39m [2m[38;2;215;119;87m│[39m[22m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [2m[38;2;215;119;87m│[39m[22m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [38;2;102;102;102mOpus 4.5 · Claude Max · nkalyanv@gmail.com's [39m [2m[38;2;215;119;87m│[39m[22m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [38;2;102;102;102mOrganization[39m [2m[38;2;215;119;87m│[39m[22m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m│[39m [38;2;102;102;102m/fast/home/franz.srambical/crowd-pilot-serializer[39m [2m[38;2;215;119;87m│[39m[22m [38;2;215;119;87m│[39m\r\n[38;2;215;119;87m╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯[39m\r\n\r\n [38;2;102;102;102mWelcome to Opus 4.5[39m\r\n[?25l\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [2mTry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l[?25l[?2004h[?1004h",,terminal_output
|
| 71 |
+
70,8321473,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [7mT[27m[2mry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l",,terminal_output
|
| 72 |
+
71,8330259,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [2mTry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l",,terminal_output
|
| 73 |
+
72,8344949,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G[38;2;71;130;200m╭───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮[39m\r\n[38;2;71;130;200m│[39m [38;2;215;119;87m✻ [39mWelcome to [1mClaude Code[22m for [38;2;71;130;200m[1mCursor[22m[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m [38;2;102;102;102minstalled extension v2.0.76[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m • Claude has context of [38;2;87;105;247m⧉ open files[39m and [38;2;87;105;247m⧉ selected lines[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m • Review Claude Code's changes [38;2;47;157;68m+11[39m [38;2;209;69;75m-22[39m in the comfort of your IDE [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m • Cmd+Esc[38;2;102;102;102m for Quick Launch[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m│[39m • Ctrl+Alt+K[38;2;102;102;102m to reference files or lines in your input[39m [38;2;71;130;200m│[39m\r\n[38;2;71;130;200m╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯[39m\r\n [38;2;102;102;102mPress Enter to continue[39m\r\n[?2026l[?1004l",,terminal_output
|
| 74 |
+
73,8475700,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [2mTry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l[?1004h[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [2mTry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m [38;2;102;102;102mChecking for updates[39m\r\n[?2026l[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [7mT[27m[2mry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m───────────────────────────────────────────────────────────────────��─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m [38;2;102;102;102mChecking for updates[39m\r\n[?2026l",,terminal_output
|
| 75 |
+
74,8476110,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [7mT[27m[2mry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l",,terminal_output
|
| 76 |
+
75,8492668,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [2mTry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l",,terminal_output
|
| 77 |
+
76,8494504,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",20,0,"",shellscript,selection_command
|
| 78 |
+
77,8494685,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",21,0,"",shellscript,selection_command
|
| 79 |
+
78,8503753,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [7mT[27m[2mry ""write a test for <filepath>""[22m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l",,terminal_output
|
| 80 |
+
79,8506798,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> e[7m [27m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n[?2026l",,terminal_output
|
| 81 |
+
80,8507164,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> ex[7m [27m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n[?2026l[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> exi[7m [27m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n[?2026l",,terminal_output
|
| 82 |
+
81,8507254,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> exit[7m [27m\r\n[2m[38;2;153;153;153m─��───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n[?2026l",,terminal_output
|
| 83 |
+
82,8507496,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[48;2;240;240;240m[38;2;0;0;0m> /exit [39m[49m\r\n[38;2;0;0;0m ⎿ Goodbye![39m\r\n\r\n[38;2;215;119;87m·[39m [38;2;215;119;87mDetermini[38;2;245;149;117mng…[38;2;215;119;87m [38;2;102;102;102m([1mesc[22m to interrupt)[39m\r\n\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n[38;2;102;102;102m> [39m[7m [27m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l",,terminal_output
|
| 84 |
+
83,8507563,"TERMINAL",0,0,"[?2026h[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[1A[2K[G\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n> [7m [27m\r\n[2m[38;2;153;153;153m─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────[39m[22m\r\n [38;2;102;102;102m? for shortcuts[39m\r\n[?2026l",,terminal_output
|
| 85 |
+
84,8508520,"TERMINAL",0,0,"[?25h[?1004l[?2026h[?25h[?2026l",,terminal_output
|
| 86 |
+
85,8508521,"TERMINAL",0,0,"]0;franz.srambical@hai-login1:~/crowd-pilot-serializer",,terminal_output
|
| 87 |
+
86,10423572,"/home/franz.srambical/slurm/jobs/franz/berlin/crowd-pilot/start_sglang_server_glm4_5_air.sh",0,0,"",shellscript,tab
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5b1a6152-1602-4538-a4b1-6fa9507221151753212707189-2025_07_22-21.32.36.855/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5d37fb4d-73be-43f4-bdda-1c3c7db3bdf31752589529764-2025_07_15-16.25.37.55/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-60545c4b-0ad0-4693-a006-70ea0695f2d01758208774932-2025_09_18-17.19.56.698/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|