Sequence
int64 1
25.2k
| Time
int64 1
858M
| File
stringclasses 830
values | RangeOffset
int64 0
2.21M
| RangeLength
int64 0
168k
| Text
stringlengths 1
4.7M
⌀ | Language
stringclasses 20
values | Type
stringclasses 9
values |
|---|---|---|---|---|---|---|---|
202
| 371,668
|
TERMINAL
| 0
| 0
|
\r\n inflating: overfit_dir/oai_sample_seed69_7.npy \r\n inflating: overfit_dir/oai_sample_seed69_10.npy \r\n inflating: overfit_dir/oai_sample_seed69_6.npy \r\n inflating: overfit_dir/oai_sample_seed69_4.npy \r\n]0;mahajanm@node17: /usr/stud/mahajanm/Projects/jafar[?2004h(jafar) ]0;mahajanm@node17: ~/Projects/jafar[01;32mmahajanm@node17[00m:[01;34m~/Projects/jafar[00m$
| null |
terminal_output
|
203
| 374,057
|
train_dynamics.py
| 0
| 0
| null |
python
|
tab
|
204
| 374,637
|
train_dynamics.py
| 12,545
| 0
| null |
python
|
selection_mouse
|
205
| 374,642
|
train_dynamics.py
| 12,544
| 0
| null |
python
|
selection_command
|
206
| 376,538
|
TERMINAL
| 0
| 0
|
unzip overfit_dir.zip
| null |
terminal_output
|
207
| 376,785
|
TERMINAL
| 0
| 0
|
mv overfit_dir overfit_dir_bak
| null |
terminal_output
|
208
| 377,431
|
TERMINAL
| 0
| 0
|
sh slurm/jobs/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch
| null |
terminal_output
|
209
| 378,432
|
TERMINAL
| 0
| 0
|
\r\n[?2004l\r#!/usr/bin/env bash\r\n\r\n#SBATCH --nodes=1\r\n#SBATCH --ntasks-per-node=1\r\n#SBATCH --time=48:00:00\r\n#SBATCH --partition=accelerated\r\n#SBATCH --cpus-per-task=5\r\n#SBATCH --gres=gpu:1\r\n#SBATCH --output=/storage/slurm/mahajanm/yoloruns/%x_%j.log\r\n#SBATCH --error=/storage/slurm/mahajanm/yoloruns/%x_%j.log\r\n#SBATCH --job-name=train_dynamics_overfit_sample_causal_actionspace-1\r\n\r\n# Log the sbatch script\r\ncat $0\r\n\r\nmodule unload mpi/openmpi/5.0\r\nmodule unload devel/cuda/12.4\r\n# source .venv/bin/activate\r\n\r\narray_records_dir=.\r\n\r\njob_name=$SLURM_JOB_NAME\r\nslurm_job_id=$SLURM_JOB_ID\r\n\r\nCHECKPOINT_DIR=/storage/user/mahajanm/Projects/world-modeling/checkpoints/causal/overfit-oai-sample-actionspace-1/$job_name/$slurm_job_id\r\nmkdir -p $CHECKPOINT_DIR\r\n\r\n# tokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\r\ntokenizer_ckpt_dir=/storage/user/mahajanm/Projects/world-modeling/checkpoints/tokenizer_ckpt\r\n\r\nenv | grep SLURM\r\n\r\nsrun python train_dynamics.py \\r\n --save_ckpt \\r\n --num_steps=2000 \\r\n --warmup_steps=0 \\r\n --wsd_decay_steps=0 \\r\n --ckpt_dir $CHECKPOINT_DIR \\r\n --batch_size=1 \\r\n --init_lr=1e-4 \\r\n --max_lr=1e-4 \\r\n --log_image_interval=1000 \\r\n --num_latent_actions=1 \\r\n --log \\r\n --log_checkpoint_interval=1000 \\r\n --name=dynamics-causal-overfit-actionspace-1-$slurm_job_id \\r\n --tags dynamics causal overfit \\r\n --entity instant-uv \\r\n --project jafar \\r\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\r\n --data_dir $array_records_dir \\r\n --dyna_dim=128 \\r\n --dyna_num_blocks=2 \\r\n --dyna_num_heads=4\r\n slurm/jobs/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch: 16: module: not found\r\nslurm/jobs/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch: 17: module: not found\r\nSLURM_STEP_NODELIST=node17\r\nSLURM_JOB_USER=mahajanm\r\nSLURM_JOB_GPUS=0\r\nSLURM_JOBID=1393544\r\nSLURM_PTY_PORT=39621\r\nSLURM_JOB_QOS=stud\r\nSLURM_JOB_NUM_NODES=1\r\nSLURM_SRUN_COMM_PORT=42571\r\nSLURM_TASKS_PER_NODE=1\r\nSLURM_NTASKS_PER_NODE=1\r\nSLURM_TOPOLOGY_ADDR_PATTERN=node\r\nSLURM_PRIO_PROCESS=0\r\nSLURM_JOB_START_TIME=1753197754\r\nSLURM_JOB_CPUS_PER_NODE=5\r\nSLURM_JOB_NAME=interactive\r\nSLURM_JOB_GID=20909\r\nSLURM_CPUS_ON_NODE=5\r\nSLURM_PROCID=0\r\nSLURM_JOB_ACCOUNT=stud\r\nSLURM_SCRIPT_CONTEXT=prolog_task\r\nSLURM_CONF=/var/spool/slurmd/conf-cache/slurm.conf\r\nSLURM_STEP_LAUNCHER_PORT=42571\r\nSLURM_SUBMIT_HOST=atcremers51\r\nSLURM_MPI_TYPE=none\r\nSLURM_GPUS_ON_NODE=1\r\nSLURM_NODELIST=node17\r\nSLURM_NNODES=1\r\nSLURM_JOB_ID=1393544\r\nSLURMD_NODENAME=node17\r\nSLURM_OOM_KILL_STEP=0\r\nSLURM_JOB_NODELIST=node17\r\nSLURM_GTIDS=0\r\nSLURM_STEPID=4294967290\r\nSLURM_CPUS_PER_TASK=5\r\nSLURM_JOB_END_TIME=1753233754\r\nSLURM_STEP_NUM_NODES=1\r\nSLURM_TRES_PER_TASK=cpu=5\r\nSLURM_PTY_WIN_ROW=27\r\nSLURM_JOB_UID=7389\r\nSLURM_CLUSTER_NAME=inf9\r\nSLURM_STEP_TASKS_PER_NODE=1\r\nSLURM_LOCALID=0\r\nSLURM_JOB_PARTITION=NORMAL\r\nSLURM_LAUNCH_NODE_IPADDR=131.159.18.70\r\nSLURMD_DEBUG=2\r\nSLURM_TASK_PID=3978593\r\nSLURM_NTASKS=1\r\nSLURM_TOPOLOGY_ADDR=node17\r\nSLURM_NPROCS=1\r\nSLURM_STEP_NUM_TASKS=1\r\nSLURM_SRUN_COMM_HOST=131.159.18.70\r\nSLURM_SUBMIT_DIR=/usr/stud/mahajanm/Projects/jafar\r\nSLURM_PTY_WIN_COL=184\r\nSLURM_STEP_ID=4294967290\r\nSLURM_NODEID=0\r\n
| null |
terminal_output
|
210
| 380,956
|
TERMINAL
| 0
| 0
|
/usr/stud/mahajanm/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `param-dtype` is annotated with type `<class 'numpy.dtype'>`, but the default value `<class 'jax.numpy.float32'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n/usr/stud/mahajanm/Projects/jafar/.venv/lib/python3.10/site-packages/tyro/_parsers.py:347: UserWarning: The field `dtype` is annotated with type `<class 'numpy.dtype'>`, but the default value `<class 'jax.numpy.bfloat16'>` has type `<class 'jax._src.numpy.scalar_types._ScalarMeta'>`. We'll try to handle this gracefully, but it may cause unexpected behavior.\r\n warnings.warn(message)\r\n
| null |
terminal_output
|
211
| 383,652
|
TERMINAL
| 0
| 0
|
2025-07-22 17:28:08.770450: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n
| null |
terminal_output
|
212
| 393,139
|
TERMINAL
| 0
| 0
|
2025-07-22 17:28:18.286614: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n
| null |
terminal_output
|
213
| 411,650
|
TERMINAL
| 0
| 0
|
2025-07-22 17:28:36.833938: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n
| null |
terminal_output
|
214
| 415,063
|
TERMINAL
| 0
| 0
|
wandb: Currently logged in as: mihir-mahajan2002 (instant-uv) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin\r\n
| null |
terminal_output
|
215
| 415,698
|
TERMINAL
| 0
| 0
|
wandb: Tracking run with wandb version 0.19.11\r\nwandb: Run data is saved locally in /usr/stud/mahajanm/Projects/jafar/wandb/run-20250722_172840-i1muzv9u\r\nwandb: Run `wandb offline` to turn off syncing.\r\nwandb: Syncing run dynamics-causal-overfit-actionspace-1-1393544\r\nwandb: ⭐️ View project at https://wandb.ai/instant-uv/jafar\r\nwandb: 🚀 View run at https://wandb.ai/instant-uv/jafar/runs/i1muzv9u\r\n
| null |
terminal_output
|
216
| 417,680
|
TERMINAL
| 0
| 0
|
WARNING:absl:Missing metrics for step 146000\r\nERROR:absl:File /storage/user/mahajanm/Projects/world-modeling/checkpoints/tokenizer_ckpt/146000/metrics/metrics not found.\r\n
| null |
terminal_output
|
217
| 434,806
|
TERMINAL
| 0
| 0
|
2025-07-22 17:28:59.967307: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-22 17:28:59.967720: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-22 17:28:59.969606: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-22 17:28:59.969632: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n2025-07-22 17:28:59.970576: W external/xla/xla/service/gpu/autotuning/dot_search_space.cc:200] All configs were filtered out because none of them sufficiently match the hints. Maybe the hints set does not contain a good representative set of valid configs?Working around this by using the full hints set instead.\r\n
| null |
terminal_output
|
218
| 499,833
|
TERMINAL
| 0
| 0
|
Running on 1 devices.\r\nCounting all components: ['tokenizer', 'lam', 'dynamics']\r\nParameter counts:\r\n{'tokenizer': 37989616, 'lam': 19349312, 'dynamics': 583168, 'total': 57922096}\r\nStep 0, loss: 9.480460166931152\r\nStep 1, loss: 9.174245834350586\r\nStep 2, loss: 8.904889106750488\r\nStep 3, loss: 8.671189308166504\r\nStep 4, loss: 8.477564811706543\r\nStep 5, loss: 8.312178611755371\r\nStep 6, loss: 8.167587280273438\r\nStep 7, loss: 8.042720794677734\r\nStep 8, loss: 7.934907913208008\r\nStep 9, loss: 7.845415115356445\r\nStep 10, loss: 7.766481399536133\r\nStep 11, loss: 7.697776794433594\r\nStep 12, loss: 7.638243675231934\r\nStep 13, loss: 7.584810733795166\r\nStep 14, loss: 7.536331653594971\r\nStep 15, loss: 7.491437911987305\r\nStep 16, loss: 7.448523044586182\r\nStep 17, loss: 7.406974792480469\r\nStep 18, loss: 7.368091583251953\r\nStep 19, loss: 7.33169412612915\r\nStep 20, loss: 7.297691345214844\r\nStep 21, loss: 7.267172336578369\r\nStep 22, loss: 7.239794731140137\r\nStep 23, loss: 7.214932441711426\r\nStep 24, loss: 7.191597938537598\r\nStep 25, loss: 7.168027400970459\r\nStep 26, loss: 7.145496845245361\r\nStep 27, loss: 7.123675346374512\r\nStep 28, loss: 7.102673530578613\r\nStep 29, loss: 7.083183288574219\r\nStep 30, loss: 7.06398344039917\r\nStep 31, loss: 7.045696258544922\r\nStep 32, loss: 7.027586460113525\r\nStep 33, loss: 7.009341239929199\r\nStep 34, loss: 6.9913811683654785\r\nStep 35, loss: 6.974061012268066\r\nStep 36, loss: 6.957348346710205\r\nStep 37, loss: 6.941093921661377\r\nStep 38, loss: 6.926098823547363\r\nStep 39, loss: 6.91159725189209\r\nStep 40, loss: 6.896056175231934\r\nStep 41, loss: 6.881134033203125\r\nStep 42, loss: 6.8659515380859375\r\nStep 43, loss: 6.851076602935791\r\nStep 44, loss: 6.8365583419799805\r\nStep 45, loss: 6.822178363800049\r\nStep 46, loss: 6.808166027069092\r\nStep 47, loss: 6.794094085693359\r\nStep 48, loss: 6.780169486999512\r\nStep 49, loss: 6.765852928161621\r\nStep 50, loss: 6.751777648925781\r\nStep 51, loss: 6.737812519073486\r\nStep 52, loss: 6.724239349365234\r\nStep 53, loss: 6.710567951202393\r\nStep 54, loss: 6.697258949279785\r\nStep 55, loss: 6.68352746963501\r\nStep 56, loss: 6.6697540283203125\r\nStep 57, loss: 6.656202793121338\r\nStep 58, loss: 6.642975807189941\r\nStep 59, loss: 6.629417419433594\r\nStep 60, loss: 6.615745544433594\r\nStep 61, loss: 6.6020612716674805\r\nStep 62, loss: 6.589019298553467\r\nStep 63, loss: 6.5756611824035645\r\nStep 64, loss: 6.562169551849365\r\nStep 65, loss: 6.548956394195557\r\nStep 66, loss: 6.535773277282715\r\nStep 67, loss: 6.522866249084473\r\nStep 68, loss: 6.5099639892578125\r\nStep 69, loss: 6.497254848480225\r\nStep 70, loss: 6.484496593475342\r\nStep 71, loss: 6.471707820892334\r\nStep 72, loss: 6.459136486053467\r\nStep 73, loss: 6.446402549743652\r\nStep 74, loss: 6.433792591094971\r\nStep 75, loss: 6.421196460723877\r\nStep 76, loss: 6.408634662628174\r\nStep 77, loss: 6.3962836265563965\r\nStep 78, loss: 6.383990287780762\r\nStep 79, loss: 6.371838092803955\r\nStep 80, loss: 6.359347820281982\r\nStep 81, loss: 6.347039222717285\r\nStep 82, loss: 6.335073471069336\r\nStep 83, loss: 6.323063373565674\r\nStep 84, loss: 6.310975551605225\r\nStep 85, loss: 6.299100875854492\r\nStep 86, loss: 6.287184238433838\r\nStep 87, loss: 6.275509357452393\r\nStep 88, loss: 6.2639689445495605\r\nStep 89, loss: 6.252169609069824\r\nStep 90, loss: 6.240726470947266\r\nStep 91, loss: 6.229242324829102\r\nStep 92, loss: 6.217950344085693\r\nStep 93, loss: 6.206677436828613\r\nStep 94, loss: 6.19516134262085\r\nStep 95, loss: 6.183949947357178\r\nStep 96, loss: 6.172664642333984\r\nStep 97, loss: 6.161623001098633\r\nStep 98, loss: 6.150652885437012\r\nStep 99, loss: 6.139809608459473\r\nStep 100, loss: 6.128810882568359\r\nStep 101, loss: 6.118030548095703\r\nStep 102, loss: 6.107471466064453\r\nStep 103, loss: 6.096898078918457\r\nStep 104, loss: 6.086309909820557\r\nStep 105, loss: 6.075642108917236\r\nStep 106, loss: 6.065386772155762\r\nStep 107, loss: 6.055035591125488\r\nStep 108, loss: 6.044913291931152\r\nStep 109, loss: 6.034868240356445\r\nStep 110, loss: 6.024741172790527\r\nStep 111, loss: 6.014811038970947\r\nStep 112, loss: 6.005013942718506\r\nStep 113, loss: 5.995076656341553\r\nStep 114, loss: 5.985289573669434\r\nStep 115, loss: 5.975541114807129\r\nStep 116, loss: 5.965916633605957\r\nStep 117, loss: 5.956300735473633\r\nStep 118, loss: 5.946953773498535\r\nStep 119, loss: 5.937253952026367\r\nStep 120, loss: 5.927814960479736\r\nStep 121, loss: 5.918559551239014\r\nStep 122, loss: 5.909236907958984\r\nStep 123, loss: 5.900134086608887\r\nStep 124, loss: 5.890810489654541\r\nStep 125, loss: 5.881741046905518\r\nStep 126, loss: 5.872820854187012\r\nStep 127, loss: 5.863893985748291\r\nStep 128, loss: 5.855283260345459\r\nStep 129, loss: 5.8463640213012695\r\nStep 130, loss: 5.83745002746582\r\nStep 131, loss: 5.828755855560303\r\nStep 132, loss: 5.820085048675537\r\nStep 133, loss: 5.811432361602783\r\nStep 134, loss: 5.802737236022949\r\nStep 135, loss: 5.793982028961182\r\nStep 136, loss: 5.785389423370361\r\nStep 137, loss: 5.776768684387207\r\nStep 138, loss: 5.76821756362915\r\nStep 139, loss: 5.759739875793457\r\nStep 140, loss: 5.751182556152344\r\nStep 141, loss: 5.7426629066467285\r\nStep 142, loss: 5.7343268394470215\r\nStep 143, loss: 5.725825786590576\r\nStep 144, loss: 5.717185974121094\r\nStep 145, loss: 5.7085957527160645\r\nStep 146, loss: 5.700030326843262\r\nStep 147, loss: 5.691714763641357\r\nStep 148, loss: 5.683136463165283\r\nStep 149, loss: 5.67514705657959\r\nStep 150, loss: 5.666609764099121\r\nStep 151, loss: 5.6584272384643555\r\nStep 152, loss: 5.650120735168457\r\nStep 153, loss: 5.641846179962158\r\nStep 154, loss: 5.633699417114258\r\nStep 155, loss: 5.625447750091553\r\nStep 156, loss: 5.617218971252441\r\nStep 157, loss: 5.608856678009033\r\nStep 158, loss: 5.6006388664245605\r\nStep 159, loss: 5.592259407043457\r\nStep 160, loss: 5.584005355834961\r\nStep 161, loss: 5.575809478759766\r\nStep 162, loss: 5.567438125610352\r\nStep 163, loss: 5.559216499328613\r\nStep 164, loss: 5.550788402557373\r\nStep 165, loss: 5.542296886444092\r\nStep 166, loss: 5.5338945388793945\r\nStep 167, loss: 5.525486946105957\r\nStep 168, loss: 5.517175674438477\r\nStep 169, loss: 5.5086469650268555\r\nStep 170, loss: 5.500133514404297\r\nStep 171, loss: 5.491679668426514\r\nStep 172, loss: 5.483109474182129\r\nStep 173, loss: 5.474625110626221\r\nStep 174, loss: 5.4663519859313965\r\nStep 175, loss: 5.457508087158203\r\nStep 176, loss: 5.449170112609863\r\nStep 177, loss: 5.44079065322876\r\nStep 178, loss: 5.432651042938232\r\nStep 179, loss: 5.42384147644043\r\nStep 180, loss: 5.415524005889893\r\nStep 181, loss: 5.407171726226807\r\nStep 182, loss: 5.398496150970459\r\nStep 183, loss: 5.390052318572998\r\nStep 184, loss: 5.381461143493652\r\nStep 185, loss: 5.37297248840332\r\nStep 186, loss: 5.364108085632324\r\nStep 187, loss: 5.355648040771484\r\nStep 188, loss: 5.346733570098877\r\nStep 189, loss: 5.337948799133301\r\nStep 190, loss: 5.329388618469238\r\nStep 191, loss: 5.320555686950684\r\nStep 192, loss: 5.311867713928223\r\nStep 193, loss: 5.302826404571533\r\nStep 194, loss: 5.294132232666016\r\nStep 195, loss: 5.285187721252441\r\nStep 196, loss: 5.275977611541748\r\nStep 197, loss: 5.267266750335693\r\nStep 198, loss: 5.258528709411621\r\nStep 199, loss: 5.249538421630859\r\nStep 200, loss: 5.240363121032715\r\nStep 201, loss: 5.231479167938232\r\nStep 202, loss: 5.2226643562316895\r\nStep 203, loss: 5.213755130767822\r\nStep 204, loss: 5.204882621765137\r\nStep 205, loss: 5.195645332336426\r\nStep 206, loss: 5.18682861328125\r\nStep 207, loss: 5.177954196929932\r\nStep 208, loss: 5.1685404777526855\r\nStep 209, loss: 5.159674644470215\r\nStep 210, loss: 5.1507697105407715\r\nStep 211, loss: 5.142022609710693\r\nStep 212, loss: 5.132679462432861\r\nStep 213, loss: 5.123526573181152\r\nStep 214, loss: 5.115063190460205\r\nStep 215, loss: 5.106110572814941\r\nStep 216, loss: 5.096709728240967\r\nStep 217, loss: 5.087846755981445\r\nStep 218, loss: 5.078727722167969\r\nStep 219, loss: 5.069620132446289\r\nStep 220, loss: 5.060574054718018\r\nStep 221, loss: 5.051539421081543\r\nStep 222, loss: 5.042352676391602\r\nStep 223, loss: 5.033210277557373\r\nStep 224, loss: 5.0240092277526855\r\nStep 225, loss: 5.014781951904297\r\nStep 226, loss: 5.005751609802246\r\nStep 227, loss: 4.99630880355835\r\nStep 228, loss: 4.9872918128967285\r\nStep 229, loss: 4.9779744148254395\r\nStep 230, loss: 4.968775272369385\r\nStep 231, loss: 4.959146499633789\r\nStep 232, loss: 4.950018405914307\r\nStep 233, loss: 4.940718173980713\r\nStep 234, loss: 4.931290149688721\r\nStep 235, loss: 4.922303676605225\r\nStep 236, loss: 4.913100242614746\r\nStep 237, loss: 4.9039435386657715\r\n
| null |
terminal_output
|
219
| 521,299
|
TERMINAL
| 0
| 0
|
Step 238, loss: 4.894733905792236\r\nStep 239, loss: 4.885193824768066\r\nStep 240, loss: 4.876297950744629\r\nStep 241, loss: 4.867188453674316\r\nStep 242, loss: 4.85792350769043\r\nStep 243, loss: 4.84892463684082\r\nStep 244, loss: 4.839658260345459\r\nStep 245, loss: 4.830465793609619\r\nStep 246, loss: 4.821856498718262\r\nStep 247, loss: 4.812151908874512\r\nStep 248, loss: 4.803061008453369\r\nStep 249, loss: 4.794219970703125\r\nStep 250, loss: 4.785078525543213\r\nStep 251, loss: 4.776050090789795\r\nStep 252, loss: 4.7667927742004395\r\nStep 253, loss: 4.7578959465026855\r\nStep 254, loss: 4.748941421508789\r\nStep 255, loss: 4.739700794219971\r\nStep 256, loss: 4.730515003204346\r\nStep 257, loss: 4.721522808074951\r\nStep 258, loss: 4.712456226348877\r\nStep 259, loss: 4.703256607055664\r\nStep 260, loss: 4.694276809692383\r\nStep 261, loss: 4.684975624084473\r\nStep 262, loss: 4.675859451293945\r\nStep 263, loss: 4.666812419891357\r\nStep 264, loss: 4.657421112060547\r\nStep 265, loss: 4.6481781005859375\r\nStep 266, loss: 4.639326095581055\r\nStep 267, loss: 4.629735946655273\r\nStep 268, loss: 4.620504379272461\r\nStep 269, loss: 4.611030578613281\r\nStep 270, loss: 4.6018195152282715\r\nStep 271, loss: 4.5924224853515625\r\nStep 272, loss: 4.583019733428955\r\nStep 273, loss: 4.57393217086792\r\nStep 274, loss: 4.564557075500488\r\nStep 275, loss: 4.554717063903809\r\nStep 276, loss: 4.545714378356934\r\nStep 277, loss: 4.536378383636475\r\nStep 278, loss: 4.526472568511963\r\nStep 279, loss: 4.51693868637085\r\nStep 280, loss: 4.5073747634887695\r\nStep 281, loss: 4.498380184173584\r\nStep 282, loss: 4.488607406616211\r\nStep 283, loss: 4.47900915145874\r\nStep 284, loss: 4.4695234298706055\r\nStep 285, loss: 4.460263252258301\r\nStep 286, loss: 4.45058536529541\r\nStep 287, loss: 4.44098424911499\r\nStep 288, loss: 4.43133020401001\r\nStep 289, loss: 4.421866416931152\r\nStep 290, loss: 4.412137985229492\r\nStep 291, loss: 4.402849197387695\r\nStep 292, loss: 4.392948150634766\r\nStep 293, loss: 4.383704662322998\r\nStep 294, loss: 4.373988628387451\r\nStep 295, loss: 4.363873481750488\r\nStep 296, loss: 4.354661464691162\r\nStep 297, loss: 4.345125675201416\r\nStep 298, loss: 4.335028171539307\r\nStep 299, loss: 4.325948715209961\r\nStep 300, loss: 4.315857410430908\r\nStep 301, loss: 4.305981636047363\r\nStep 302, loss: 4.296720027923584\r\nStep 303, loss: 4.286660194396973\r\nStep 304, loss: 4.277304172515869\r\nStep 305, loss: 4.267578601837158\r\nStep 306, loss: 4.257894515991211\r\nStep 307, loss: 4.248012542724609\r\nStep 308, loss: 4.2386794090271\r\nStep 309, loss: 4.228699207305908\r\nStep 310, loss: 4.219088554382324\r\nStep 311, loss: 4.209353923797607\r\nStep 312, loss: 4.199688911437988\r\nStep 313, loss: 4.1900153160095215\r\nStep 314, loss: 4.1804022789001465\r\nStep 315, loss: 4.170781135559082\r\nStep 316, loss: 4.16085958480835\r\nStep 317, loss: 4.15128231048584\r\nStep 318, loss: 4.142023086547852\r\nStep 319, loss: 4.132155895233154\r\nStep 320, loss: 4.122425079345703\r\nStep 321, loss: 4.112797260284424\r\nStep 322, loss: 4.103492736816406\r\nStep 323, loss: 4.093623161315918\r\nStep 324, loss: 4.084210395812988\r\nStep 325, loss: 4.074493408203125\r\nStep 326, loss: 4.064899921417236\r\nStep 327, loss: 4.055786609649658\r\nStep 328, loss: 4.046080112457275\r\nStep 329, loss: 4.036588191986084\r\nStep 330, loss: 4.02688455581665\r\nStep 331, loss: 4.017064094543457\r\nStep 332, loss: 4.00753116607666\r\nStep 333, loss: 3.9982128143310547\r\nStep 334, loss: 3.988337755203247\r\nStep 335, loss: 3.9787819385528564\r\nStep 336, loss: 3.9692628383636475\r\nStep 337, loss: 3.9598398208618164\r\nStep 338, loss: 3.9500536918640137\r\nStep 339, loss: 3.94045090675354\r\nStep 340, loss: 3.9308156967163086\r\nStep 341, loss: 3.9215378761291504\r\nStep 342, loss: 3.912440299987793\r\nStep 343, loss: 3.9020469188690186\r\nStep 344, loss: 3.8925399780273438\r\nStep 345, loss: 3.8831520080566406\r\nStep 346, loss: 3.873824119567871\r\nStep 347, loss: 3.8638155460357666\r\nStep 348, loss: 3.8546125888824463\r\nStep 349, loss: 3.845461130142212\r\nStep 350, loss: 3.836083173751831\r\nStep 351, loss: 3.825646162033081\r\nStep 352, loss: 3.816453695297241\r\nStep 353, loss: 3.807070255279541\r\nStep 354, loss: 3.7974095344543457\r\nStep 355, loss: 3.7876882553100586\r\nStep 356, loss: 3.778428077697754\r\nStep 357, loss: 3.768634080886841\r\nStep 358, loss: 3.759089231491089\r\nStep 359, loss: 3.7496337890625\r\nStep 360, loss: 3.7398829460144043\r\nStep 361, loss: 3.7303173542022705\r\nStep 362, loss: 3.720412492752075\r\nStep 363, loss: 3.710479497909546\r\nStep 364, loss: 3.701172351837158\r\nStep 365, loss: 3.691666603088379\r\nStep 366, loss: 3.6819381713867188\r\nStep 367, loss: 3.672013998031616\r\nStep 368, loss: 3.662156105041504\r\nStep 369, loss: 3.6528818607330322\r\nStep 370, loss: 3.6430647373199463\r\nStep 371, loss: 3.632901191711426\r\nStep 372, loss: 3.623969554901123\r\nStep 373, loss: 3.6138224601745605\r\nStep 374, loss: 3.6040446758270264\r\nStep 375, loss: 3.5947954654693604\r\nStep 376, loss: 3.5852935314178467\r\nStep 377, loss: 3.575495481491089\r\nStep 378, loss: 3.5651743412017822\r\nStep 379, loss: 3.5559728145599365\r\nStep 380, loss: 3.546360492706299\r\nStep 381, loss: 3.536983013153076\r\nStep 382, loss: 3.52663254737854\r\nStep 383, loss: 3.517277717590332\r\nStep 384, loss: 3.5077924728393555\r\nStep 385, loss: 3.4979724884033203\r\nStep 386, loss: 3.4880731105804443\r\nStep 387, loss: 3.478489875793457\r\nStep 388, loss: 3.4685981273651123\r\nStep 389, loss: 3.45894193649292\r\nStep 390, loss: 3.449213743209839\r\nStep 391, loss: 3.4389641284942627\r\nStep 392, loss: 3.4291083812713623\r\nStep 393, loss: 3.4195480346679688\r\nStep 394, loss: 3.409362554550171\r\nStep 395, loss: 3.3995532989501953\r\nStep 396, loss: 3.3902361392974854\r\nStep 397, loss: 3.3808505535125732\r\nStep 398, loss: 3.3704354763031006\r\nStep 399, loss: 3.3609602451324463\r\nStep 400, loss: 3.3514811992645264\r\nStep 401, loss: 3.3416638374328613\r\nStep 402, loss: 3.332169532775879\r\nStep 403, loss: 3.3225908279418945\r\nStep 404, loss: 3.313539505004883\r\nStep 405, loss: 3.3031551837921143\r\nStep 406, loss: 3.294442892074585\r\nStep 407, loss: 3.284700632095337\r\nStep 408, loss: 3.274904489517212\r\nStep 409, loss: 3.2653958797454834\r\nStep 410, loss: 3.2560031414031982\r\nStep 411, loss: 3.246459722518921\r\nStep 412, loss: 3.2366511821746826\r\nStep 413, loss: 3.2274889945983887\r\nStep 414, loss: 3.217991352081299\r\nStep 415, loss: 3.2080228328704834\r\nStep 416, loss: 3.1982691287994385\r\nStep 417, loss: 3.1893160343170166\r\nStep 418, loss: 3.1801490783691406\r\nStep 419, loss: 3.1703829765319824\r\nStep 420, loss: 3.1605286598205566\r\nStep 421, loss: 3.1506621837615967\r\nStep 422, loss: 3.142115592956543\r\nStep 423, loss: 3.132230758666992\r\nStep 424, loss: 3.122910976409912\r\nStep 425, loss: 3.1128222942352295\r\nStep 426, loss: 3.1040496826171875\r\nStep 427, loss: 3.0945470333099365\r\nStep 428, loss: 3.0846142768859863\r\nStep 429, loss: 3.075422763824463\r\nStep 430, loss: 3.0660951137542725\r\nStep 431, loss: 3.056744337081909\r\nStep 432, loss: 3.047128438949585\r\nStep 433, loss: 3.0381076335906982\r\nStep 434, loss: 3.0277092456817627\r\nStep 435, loss: 3.0194830894470215\r\nStep 436, loss: 3.0096030235290527\r\nStep 437, loss: 3.0000758171081543\r\nStep 438, loss: 2.9905357360839844\r\nStep 439, loss: 2.982107639312744\r\nStep 440, loss: 2.97210431098938\r\nStep 441, loss: 2.962830066680908\r\nStep 442, loss: 2.9531683921813965\r\nStep 443, loss: 2.9438047409057617\r\nStep 444, loss: 2.934458017349243\r\nStep 445, loss: 2.9251554012298584\r\nStep 446, loss: 2.9156010150909424\r\nStep 447, loss: 2.9065234661102295\r\nStep 448, loss: 2.89675235748291\r\nStep 449, loss: 2.8871920108795166\r\nStep 450, loss: 2.8787176609039307\r\nStep 451, loss: 2.8691062927246094\r\nStep 452, loss: 2.8602843284606934\r\nStep 453, loss: 2.8498008251190186\r\nStep 454, loss: 2.8407888412475586\r\nStep 455, loss: 2.832183361053467\r\nStep 456, loss: 2.8221890926361084\r\nStep 457, loss: 2.813371419906616\r\nStep 458, loss: 2.8038954734802246\r\nStep 459, loss: 2.7948107719421387\r\nStep 460, loss: 2.7855441570281982\r\nStep 461, loss: 2.776766061782837\r\nStep 462, loss: 2.7675633430480957\r\nStep 463, loss: 2.7573256492614746\r\nStep 464, loss: 2.7492001056671143\r\nStep 465, loss: 2.739835739135742\r\nStep 466, loss: 2.7302825450897217\r\nStep 467, loss: 2.7224576473236084\r\nStep 468, loss: 2.7117347717285156\r\nStep 469, loss: 2.7038071155548096\r\nStep 470, loss: 2.694979190826416\r\nStep 471, loss: 2.6850578784942627\r\nStep 472, loss: 2.676320791244507\r\nStep 473, loss: 2.6673424243927\r\nStep 474, loss: 2.6582489013671875\r\nStep 475, loss: 2.6493124961853027\r\n
| null |
terminal_output
|
220
| 542,670
|
TERMINAL
| 0
| 0
|
Step 476, loss: 2.6405060291290283\r\nStep 477, loss: 2.6302950382232666\r\nStep 478, loss: 2.6221768856048584\r\nStep 479, loss: 2.6125497817993164\r\nStep 480, loss: 2.603834390640259\r\nStep 481, loss: 2.5952601432800293\r\nStep 482, loss: 2.5861666202545166\r\nStep 483, loss: 2.5766828060150146\r\nStep 484, loss: 2.5685880184173584\r\nStep 485, loss: 2.5581912994384766\r\nStep 486, loss: 2.550020694732666\r\nStep 487, loss: 2.5405330657958984\r\nStep 488, loss: 2.5311882495880127\r\nStep 489, loss: 2.522549629211426\r\nStep 490, loss: 2.5129308700561523\r\nStep 491, loss: 2.5040457248687744\r\nStep 492, loss: 2.4944605827331543\r\nStep 493, loss: 2.4854795932769775\r\nStep 494, loss: 2.475815534591675\r\nStep 495, loss: 2.4668164253234863\r\nStep 496, loss: 2.4571173191070557\r\nStep 497, loss: 2.447866201400757\r\nStep 498, loss: 2.439094305038452\r\nStep 499, loss: 2.4298014640808105\r\nStep 500, loss: 2.420858144760132\r\nStep 501, loss: 2.4103193283081055\r\nStep 502, loss: 2.401169538497925\r\nStep 503, loss: 2.3925647735595703\r\nStep 504, loss: 2.3834238052368164\r\nStep 505, loss: 2.3750646114349365\r\nStep 506, loss: 2.3658201694488525\r\nStep 507, loss: 2.356174945831299\r\nStep 508, loss: 2.347342014312744\r\nStep 509, loss: 2.3392539024353027\r\nStep 510, loss: 2.328644037246704\r\nStep 511, loss: 2.3218212127685547\r\nStep 512, loss: 2.3114004135131836\r\nStep 513, loss: 2.303480386734009\r\nStep 514, loss: 2.2939891815185547\r\nStep 515, loss: 2.2851333618164062\r\nStep 516, loss: 2.276604652404785\r\nStep 517, loss: 2.267066478729248\r\nStep 518, loss: 2.2589471340179443\r\nStep 519, loss: 2.249279260635376\r\nStep 520, loss: 2.2410078048706055\r\nStep 521, loss: 2.231337785720825\r\nStep 522, loss: 2.2231061458587646\r\nStep 523, loss: 2.2135684490203857\r\nStep 524, loss: 2.2049403190612793\r\nStep 525, loss: 2.196107864379883\r\nStep 526, loss: 2.1873886585235596\r\nStep 527, loss: 2.178758382797241\r\nStep 528, loss: 2.169591188430786\r\nStep 529, loss: 2.1614770889282227\r\nStep 530, loss: 2.151592969894409\r\nStep 531, loss: 2.1440787315368652\r\nStep 532, loss: 2.1354286670684814\r\nStep 533, loss: 2.1259350776672363\r\nStep 534, loss: 2.1175458431243896\r\nStep 535, loss: 2.1089038848876953\r\nStep 536, loss: 2.0997772216796875\r\nStep 537, loss: 2.0917932987213135\r\nStep 538, loss: 2.0829873085021973\r\nStep 539, loss: 2.074860095977783\r\nStep 540, loss: 2.065082550048828\r\nStep 541, loss: 2.057464599609375\r\nStep 542, loss: 2.0491132736206055\r\nStep 543, loss: 2.040846824645996\r\nStep 544, loss: 2.0321030616760254\r\nStep 545, loss: 2.02241849899292\r\nStep 546, loss: 2.0150506496429443\r\nStep 547, loss: 2.0061466693878174\r\nStep 548, loss: 1.9982203245162964\r\nStep 549, loss: 1.9893665313720703\r\nStep 550, loss: 1.9798681735992432\r\nStep 551, loss: 1.9730687141418457\r\nStep 552, loss: 1.9634298086166382\r\nStep 553, loss: 1.9554318189620972\r\nStep 554, loss: 1.9464129209518433\r\nStep 555, loss: 1.9378924369812012\r\nStep 556, loss: 1.9300504922866821\r\nStep 557, loss: 1.9213491678237915\r\nStep 558, loss: 1.9134242534637451\r\nStep 559, loss: 1.9036660194396973\r\nStep 560, loss: 1.896031379699707\r\nStep 561, loss: 1.8877346515655518\r\nStep 562, loss: 1.8795784711837769\r\nStep 563, loss: 1.8702267408370972\r\nStep 564, loss: 1.8619623184204102\r\nStep 565, loss: 1.8548887968063354\r\nStep 566, loss: 1.8458988666534424\r\nStep 567, loss: 1.8369609117507935\r\nStep 568, loss: 1.8285566568374634\r\nStep 569, loss: 1.8210422992706299\r\nStep 570, loss: 1.8119643926620483\r\nStep 571, loss: 1.8035266399383545\r\nStep 572, loss: 1.7958271503448486\r\nStep 573, loss: 1.7876445055007935\r\nStep 574, loss: 1.7792373895645142\r\nStep 575, loss: 1.7707502841949463\r\nStep 576, loss: 1.7626464366912842\r\nStep 577, loss: 1.754691481590271\r\nStep 578, loss: 1.7462551593780518\r\nStep 579, loss: 1.7379928827285767\r\nStep 580, loss: 1.7292394638061523\r\nStep 581, loss: 1.7222474813461304\r\nStep 582, loss: 1.7132426500320435\r\nStep 583, loss: 1.7050526142120361\r\nStep 584, loss: 1.696417212486267\r\nStep 585, loss: 1.688719391822815\r\nStep 586, loss: 1.6805790662765503\r\nStep 587, loss: 1.6722204685211182\r\nStep 588, loss: 1.6637897491455078\r\nStep 589, loss: 1.656758189201355\r\nStep 590, loss: 1.6476562023162842\r\nStep 591, loss: 1.6403577327728271\r\nStep 592, loss: 1.6345572471618652\r\nStep 593, loss: 1.6236876249313354\r\nStep 594, loss: 1.6165374517440796\r\nStep 595, loss: 1.611033320426941\r\nStep 596, loss: 1.600266933441162\r\nStep 597, loss: 1.594078779220581\r\nStep 598, loss: 1.5845835208892822\r\nStep 599, loss: 1.5804063081741333\r\nStep 600, loss: 1.5697143077850342\r\nStep 601, loss: 1.5637640953063965\r\nStep 602, loss: 1.5549819469451904\r\nStep 603, loss: 1.5479040145874023\r\nStep 604, loss: 1.5403248071670532\r\nStep 605, loss: 1.531739592552185\r\nStep 606, loss: 1.5247349739074707\r\nStep 607, loss: 1.5163322687149048\r\nStep 608, loss: 1.5093250274658203\r\nStep 609, loss: 1.5019617080688477\r\nStep 610, loss: 1.4946192502975464\r\nStep 611, loss: 1.4866294860839844\r\nStep 612, loss: 1.478313684463501\r\nStep 613, loss: 1.4709464311599731\r\nStep 614, loss: 1.4638079404830933\r\nStep 615, loss: 1.455183506011963\r\nStep 616, loss: 1.4491721391677856\r\nStep 617, loss: 1.4405052661895752\r\nStep 618, loss: 1.4325041770935059\r\nStep 619, loss: 1.426254391670227\r\nStep 620, loss: 1.418396234512329\r\nStep 621, loss: 1.4097704887390137\r\nStep 622, loss: 1.402566909790039\r\nStep 623, loss: 1.394473910331726\r\nStep 624, loss: 1.3870716094970703\r\nStep 625, loss: 1.378894567489624\r\nStep 626, loss: 1.3715211153030396\r\nStep 627, loss: 1.3639756441116333\r\nStep 628, loss: 1.357397437095642\r\nStep 629, loss: 1.3519854545593262\r\nStep 630, loss: 1.343829870223999\r\nStep 631, loss: 1.3343448638916016\r\nStep 632, loss: 1.328412652015686\r\nStep 633, loss: 1.3215456008911133\r\nStep 634, loss: 1.3130720853805542\r\nStep 635, loss: 1.3067958354949951\r\nStep 636, loss: 1.2988814115524292\r\nStep 637, loss: 1.2920023202896118\r\nStep 638, loss: 1.2851738929748535\r\nStep 639, loss: 1.2772091627120972\r\nStep 640, loss: 1.270416498184204\r\nStep 641, loss: 1.2633512020111084\r\nStep 642, loss: 1.2555394172668457\r\nStep 643, loss: 1.2508431673049927\r\nStep 644, loss: 1.2421696186065674\r\nStep 645, loss: 1.2357604503631592\r\nStep 646, loss: 1.2296723127365112\r\nStep 647, loss: 1.2216318845748901\r\nStep 648, loss: 1.2139776945114136\r\nStep 649, loss: 1.208685278892517\r\nStep 650, loss: 1.2003580331802368\r\nStep 651, loss: 1.1939011812210083\r\nStep 652, loss: 1.1880502700805664\r\nStep 653, loss: 1.1793875694274902\r\nStep 654, loss: 1.1740106344223022\r\nStep 655, loss: 1.1668822765350342\r\nStep 656, loss: 1.1599276065826416\r\nStep 657, loss: 1.1530556678771973\r\nStep 658, loss: 1.1463032960891724\r\nStep 659, loss: 1.1390328407287598\r\nStep 660, loss: 1.1325305700302124\r\nStep 661, loss: 1.1267162561416626\r\nStep 662, loss: 1.1200804710388184\r\nStep 663, loss: 1.11142098903656\r\nStep 664, loss: 1.1065025329589844\r\nStep 665, loss: 1.098965048789978\r\nStep 666, loss: 1.0916056632995605\r\nStep 667, loss: 1.0852556228637695\r\nStep 668, loss: 1.0806269645690918\r\nStep 669, loss: 1.0740106105804443\r\nStep 670, loss: 1.0665388107299805\r\nStep 671, loss: 1.0603067874908447\r\nStep 672, loss: 1.0545412302017212\r\nStep 673, loss: 1.0470287799835205\r\nStep 674, loss: 1.0418726205825806\r\nStep 675, loss: 1.0345330238342285\r\nStep 676, loss: 1.0275324583053589\r\nStep 677, loss: 1.0222522020339966\r\nStep 678, loss: 1.0161998271942139\r\nStep 679, loss: 1.007767915725708\r\nStep 680, loss: 1.0022755861282349\r\nStep 681, loss: 0.9957999587059021\r\nStep 682, loss: 0.9886702299118042\r\nStep 683, loss: 0.9833372235298157\r\nStep 684, loss: 0.9767733216285706\r\nStep 685, loss: 0.9698354601860046\r\nStep 686, loss: 0.9639562964439392\r\nStep 687, loss: 0.9569283723831177\r\nStep 688, loss: 0.9513558745384216\r\nStep 689, loss: 0.9463639259338379\r\nStep 690, loss: 0.9419379830360413\r\nStep 691, loss: 0.9360841512680054\r\nStep 692, loss: 0.9312412142753601\r\nStep 693, loss: 0.9228417873382568\r\nStep 694, loss: 0.9205953478813171\r\nStep 695, loss: 0.911032497882843\r\nStep 696, loss: 0.9065867066383362\r\nStep 697, loss: 0.8999131917953491\r\nStep 698, loss: 0.894045352935791\r\nStep 699, loss: 0.8881818652153015\r\nStep 700, loss: 0.8823570013046265\r\nStep 701, loss: 0.8777108788490295\r\nStep 702, loss: 0.8707617521286011\r\nStep 703, loss: 0.8676276206970215\r\nStep 704, loss: 0.8586929440498352\r\nStep 705, loss: 0.8560969829559326\r\nStep 706, loss: 0.8483167290687561\r\nStep 707, loss: 0.8432534337043762\r\nStep 708, loss: 0.8376733660697937\r\nStep 709, loss: 0.8318489789962769\r\nStep 710, loss: 0.8265501260757446\r\n
| null |
terminal_output
|
221
| 563,569
|
TERMINAL
| 0
| 0
|
Step 711, loss: 0.8203813433647156\r\nStep 712, loss: 0.815281331539154\r\nStep 713, loss: 0.8093656301498413\r\nStep 714, loss: 0.8039189577102661\r\nStep 715, loss: 0.7983381748199463\r\nStep 716, loss: 0.7914412617683411\r\nStep 717, loss: 0.7867292761802673\r\nStep 718, loss: 0.7814499139785767\r\nStep 719, loss: 0.7752460837364197\r\nStep 720, loss: 0.7703561186790466\r\nStep 721, loss: 0.7652120590209961\r\nStep 722, loss: 0.7591753005981445\r\nStep 723, loss: 0.7541587352752686\r\nStep 724, loss: 0.7495186924934387\r\nStep 725, loss: 0.7431130409240723\r\nStep 726, loss: 0.738324761390686\r\nStep 727, loss: 0.7326000332832336\r\nStep 728, loss: 0.728775680065155\r\nStep 729, loss: 0.7211100459098816\r\nStep 730, loss: 0.7169945240020752\r\nStep 731, loss: 0.7126502990722656\r\nStep 732, loss: 0.7071053385734558\r\nStep 733, loss: 0.7024368047714233\r\nStep 734, loss: 0.696080207824707\r\nStep 735, loss: 0.6906725764274597\r\nStep 736, loss: 0.6859578490257263\r\nStep 737, loss: 0.6807000041007996\r\nStep 738, loss: 0.6750681400299072\r\nStep 739, loss: 0.6701419949531555\r\nStep 740, loss: 0.6654138565063477\r\nStep 741, loss: 0.6601566076278687\r\nStep 742, loss: 0.6547061204910278\r\nStep 743, loss: 0.6499132513999939\r\nStep 744, loss: 0.6456824541091919\r\nStep 745, loss: 0.6397626996040344\r\nStep 746, loss: 0.6353244781494141\r\nStep 747, loss: 0.6297488212585449\r\nStep 748, loss: 0.6258376836776733\r\nStep 749, loss: 0.6202471256256104\r\nStep 750, loss: 0.6147438287734985\r\nStep 751, loss: 0.6106806993484497\r\nStep 752, loss: 0.6063467860221863\r\nStep 753, loss: 0.6015092134475708\r\nStep 754, loss: 0.6000180840492249\r\nStep 755, loss: 0.5919783115386963\r\nStep 756, loss: 0.5880365967750549\r\nStep 757, loss: 0.585043728351593\r\nStep 758, loss: 0.579394519329071\r\nStep 759, loss: 0.5743000507354736\r\nStep 760, loss: 0.5690972208976746\r\nStep 761, loss: 0.5666753053665161\r\nStep 762, loss: 0.5614392757415771\r\nStep 763, loss: 0.5562823414802551\r\nStep 764, loss: 0.552855372428894\r\nStep 765, loss: 0.5483986735343933\r\nStep 766, loss: 0.5435653328895569\r\nStep 767, loss: 0.5386010408401489\r\nStep 768, loss: 0.535403311252594\r\nStep 769, loss: 0.5312720537185669\r\nStep 770, loss: 0.5277209281921387\r\nStep 771, loss: 0.5228639841079712\r\nStep 772, loss: 0.518312394618988\r\nStep 773, loss: 0.5143700838088989\r\nStep 774, loss: 0.510115921497345\r\nStep 775, loss: 0.5059271454811096\r\nStep 776, loss: 0.5012122392654419\r\nStep 777, loss: 0.49865368008613586\r\nStep 778, loss: 0.49385520815849304\r\nStep 779, loss: 0.48963162302970886\r\nStep 780, loss: 0.48602399230003357\r\nStep 781, loss: 0.4810878336429596\r\nStep 782, loss: 0.47662225365638733\r\nStep 783, loss: 0.4729703664779663\r\nStep 784, loss: 0.46934133768081665\r\nStep 785, loss: 0.46498236060142517\r\nStep 786, loss: 0.4613810181617737\r\nStep 787, loss: 0.4574361741542816\r\nStep 788, loss: 0.45445942878723145\r\nStep 789, loss: 0.4504217207431793\r\nStep 790, loss: 0.4457048773765564\r\nStep 791, loss: 0.4432389736175537\r\nStep 792, loss: 0.4384344220161438\r\nStep 793, loss: 0.43453627824783325\r\nStep 794, loss: 0.4306294918060303\r\nStep 795, loss: 0.426582396030426\r\nStep 796, loss: 0.4241162836551666\r\nStep 797, loss: 0.41930893063545227\r\nStep 798, loss: 0.4151383340358734\r\nStep 799, loss: 0.4129972755908966\r\nStep 800, loss: 0.4086190164089203\r\nStep 801, loss: 0.4041345715522766\r\nStep 802, loss: 0.40122750401496887\r\nStep 803, loss: 0.3976961076259613\r\nStep 804, loss: 0.39345303177833557\r\nStep 805, loss: 0.39185962080955505\r\nStep 806, loss: 0.3899138867855072\r\nStep 807, loss: 0.38617050647735596\r\nStep 808, loss: 0.38049566745758057\r\nStep 809, loss: 0.3800064027309418\r\nStep 810, loss: 0.3761117458343506\r\nStep 811, loss: 0.3709666430950165\r\nStep 812, loss: 0.3697470724582672\r\nStep 813, loss: 0.36439675092697144\r\nStep 814, loss: 0.3634401261806488\r\nStep 815, loss: 0.3599107563495636\r\nStep 816, loss: 0.35557252168655396\r\nStep 817, loss: 0.35556721687316895\r\nStep 818, loss: 0.34895336627960205\r\nStep 819, loss: 0.34764376282691956\r\nStep 820, loss: 0.34617510437965393\r\nStep 821, loss: 0.3397122621536255\r\nStep 822, loss: 0.33907583355903625\r\nStep 823, loss: 0.33380836248397827\r\nStep 824, loss: 0.3312379717826843\r\nStep 825, loss: 0.3288106620311737\r\nStep 826, loss: 0.325488805770874\r\nStep 827, loss: 0.3230416774749756\r\nStep 828, loss: 0.32044318318367004\r\nStep 829, loss: 0.31666266918182373\r\nStep 830, loss: 0.31493043899536133\r\nStep 831, loss: 0.3113175630569458\r\nStep 832, loss: 0.30849435925483704\r\nStep 833, loss: 0.3068937659263611\r\nStep 834, loss: 0.30386847257614136\r\nStep 835, loss: 0.2996760904788971\r\nStep 836, loss: 0.2978828549385071\r\nStep 837, loss: 0.2940269708633423\r\nStep 838, loss: 0.2916935384273529\r\nStep 839, loss: 0.2890036702156067\r\nStep 840, loss: 0.2862553596496582\r\nStep 841, loss: 0.28316643834114075\r\nStep 842, loss: 0.2807336449623108\r\nStep 843, loss: 0.2793591022491455\r\nStep 844, loss: 0.27752894163131714\r\nStep 845, loss: 0.2729346752166748\r\nStep 846, loss: 0.2706596255302429\r\nStep 847, loss: 0.2683390974998474\r\nStep 848, loss: 0.2649962902069092\r\nStep 849, loss: 0.26284539699554443\r\nStep 850, loss: 0.26016294956207275\r\nStep 851, loss: 0.25724607706069946\r\nStep 852, loss: 0.25553545355796814\r\nStep 853, loss: 0.2524797320365906\r\nStep 854, loss: 0.25013959407806396\r\nStep 855, loss: 0.24743151664733887\r\nStep 856, loss: 0.24442772567272186\r\nStep 857, loss: 0.24259303510189056\r\nStep 858, loss: 0.23963691294193268\r\nStep 859, loss: 0.23761345446109772\r\nStep 860, loss: 0.2352614402770996\r\nStep 861, loss: 0.23318371176719666\r\nStep 862, loss: 0.23081550002098083\r\nStep 863, loss: 0.23004238307476044\r\nStep 864, loss: 0.22662876546382904\r\nStep 865, loss: 0.2235913723707199\r\nStep 866, loss: 0.22256506979465485\r\nStep 867, loss: 0.22038331627845764\r\nStep 868, loss: 0.21701249480247498\r\nStep 869, loss: 0.2155487984418869\r\nStep 870, loss: 0.21491599082946777\r\nStep 871, loss: 0.21212685108184814\r\nStep 872, loss: 0.2090834379196167\r\nStep 873, loss: 0.2066079080104828\r\nStep 874, loss: 0.20507854223251343\r\nStep 875, loss: 0.2032628208398819\r\nStep 876, loss: 0.1995580792427063\r\nStep 877, loss: 0.19908101856708527\r\nStep 878, loss: 0.19645556807518005\r\nStep 879, loss: 0.1949346661567688\r\nStep 880, loss: 0.19276867806911469\r\nStep 881, loss: 0.18997792899608612\r\nStep 882, loss: 0.1888667643070221\r\nStep 883, loss: 0.1870226413011551\r\nStep 884, loss: 0.18468765914440155\r\nStep 885, loss: 0.18309618532657623\r\nStep 886, loss: 0.18114908039569855\r\nStep 887, loss: 0.1793384999036789\r\nStep 888, loss: 0.17667841911315918\r\nStep 889, loss: 0.17582754790782928\r\nStep 890, loss: 0.17477206885814667\r\nStep 891, loss: 0.1727575808763504\r\nStep 892, loss: 0.17030151188373566\r\nStep 893, loss: 0.1687196046113968\r\nStep 894, loss: 0.16673330962657928\r\nStep 895, loss: 0.16532987356185913\r\nStep 896, loss: 0.1632375866174698\r\nStep 897, loss: 0.1611623615026474\r\nStep 898, loss: 0.16117429733276367\r\nStep 899, loss: 0.15819208323955536\r\nStep 900, loss: 0.15610043704509735\r\nStep 901, loss: 0.15601682662963867\r\nStep 902, loss: 0.153066486120224\r\nStep 903, loss: 0.1525007039308548\r\nStep 904, loss: 0.15008215606212616\r\nStep 905, loss: 0.14826105535030365\r\nStep 906, loss: 0.14709369838237762\r\nStep 907, loss: 0.14500559866428375\r\nStep 908, loss: 0.14406515657901764\r\nStep 909, loss: 0.14181755483150482\r\nStep 910, loss: 0.14063189923763275\r\nStep 911, loss: 0.13957737386226654\r\nStep 912, loss: 0.13771995902061462\r\nStep 913, loss: 0.135623961687088\r\nStep 914, loss: 0.13488002121448517\r\nStep 915, loss: 0.1338200867176056\r\nStep 916, loss: 0.1321789175271988\r\nStep 917, loss: 0.13146424293518066\r\nStep 918, loss: 0.13208389282226562\r\nStep 919, loss: 0.1290474236011505\r\nStep 920, loss: 0.12728333473205566\r\nStep 921, loss: 0.12628008425235748\r\nStep 922, loss: 0.1249026358127594\r\nStep 923, loss: 0.12538869678974152\r\nStep 924, loss: 0.12108120322227478\r\nStep 925, loss: 0.1216302141547203\r\nStep 926, loss: 0.11867577582597733\r\nStep 927, loss: 0.11953262984752655\r\nStep 928, loss: 0.11693321913480759\r\nStep 929, loss: 0.11638150364160538\r\nStep 930, loss: 0.11405260860919952\r\nStep 931, loss: 0.1135733500123024\r\nStep 932, loss: 0.11232151836156845\r\nStep 933, loss: 0.1113588809967041\r\nStep 934, loss: 0.10962262004613876\r\nStep 935, loss: 0.1084393635392189\r\nStep 936, loss: 0.10792259871959686\r\nStep 937, loss: 0.10561209172010422\r\nStep 938, loss: 0.10535231232643127\r\nStep 939, loss: 0.10442840307950974\r\nStep 940, loss: 0.10320015996694565\r\nStep 941, loss: 0.10162319988012314\r\n
| null |
terminal_output
|
222
| 593,995
|
TERMINAL
| 0
| 0
|
Step 942, loss: 0.1008194088935852\r\nStep 943, loss: 0.09929802268743515\r\nStep 944, loss: 0.09922713041305542\r\nStep 945, loss: 0.0976608544588089\r\nStep 946, loss: 0.09667724370956421\r\nStep 947, loss: 0.09602819383144379\r\nStep 948, loss: 0.09427038580179214\r\nStep 949, loss: 0.09377054870128632\r\nStep 950, loss: 0.09235028177499771\r\nStep 951, loss: 0.09160634130239487\r\nStep 952, loss: 0.09092758595943451\r\nStep 953, loss: 0.08997069299221039\r\nStep 954, loss: 0.088309146463871\r\nStep 955, loss: 0.08804954588413239\r\nStep 956, loss: 0.08700817078351974\r\nStep 957, loss: 0.08623076975345612\r\nStep 958, loss: 0.08560863137245178\r\nStep 959, loss: 0.08365897834300995\r\nStep 960, loss: 0.08328527212142944\r\nStep 961, loss: 0.08257988095283508\r\nStep 962, loss: 0.08099255710840225\r\nStep 963, loss: 0.08023347705602646\r\nStep 964, loss: 0.0790921226143837\r\nStep 965, loss: 0.07866410166025162\r\nStep 966, loss: 0.07775773853063583\r\nStep 967, loss: 0.07782191038131714\r\nStep 968, loss: 0.07706552743911743\r\nStep 969, loss: 0.07503318786621094\r\nStep 970, loss: 0.07446465641260147\r\nStep 971, loss: 0.07465951889753342\r\nStep 972, loss: 0.07266110926866531\r\nStep 973, loss: 0.07263903319835663\r\nStep 974, loss: 0.07183026522397995\r\nStep 975, loss: 0.07099255919456482\r\nStep 976, loss: 0.06958350539207458\r\nStep 977, loss: 0.06936709582805634\r\nStep 978, loss: 0.06806983053684235\r\nStep 979, loss: 0.06725488603115082\r\nStep 980, loss: 0.06663209199905396\r\nStep 981, loss: 0.06566598266363144\r\nStep 982, loss: 0.06491410732269287\r\nStep 983, loss: 0.06428809463977814\r\nStep 984, loss: 0.06356185674667358\r\nStep 985, loss: 0.06272757053375244\r\nStep 986, loss: 0.06217228248715401\r\nStep 987, loss: 0.06150488182902336\r\nStep 988, loss: 0.06079510971903801\r\nStep 989, loss: 0.059915702790021896\r\nStep 990, loss: 0.05914614349603653\r\nStep 991, loss: 0.05854793265461922\r\nStep 992, loss: 0.058074913918972015\r\nStep 993, loss: 0.05741223692893982\r\nStep 994, loss: 0.05708782374858856\r\nStep 995, loss: 0.05635122209787369\r\nStep 996, loss: 0.0550115630030632\r\nStep 997, loss: 0.05477910861372948\r\nStep 998, loss: 0.05405436083674431\r\nStep 999, loss: 0.0537639856338501\r\nSaved checkpoint at step 1000\r\nStep 1000, loss: 0.05402640253305435\r\nStep 1001, loss: 0.05313023179769516\r\nStep 1002, loss: 0.05203690379858017\r\nStep 1003, loss: 0.05204297974705696\r\nStep 1004, loss: 0.050889063626527786\r\nStep 1005, loss: 0.05082974582910538\r\nStep 1006, loss: 0.0496949702501297\r\nStep 1007, loss: 0.049189139157533646\r\nStep 1008, loss: 0.04872356727719307\r\nStep 1009, loss: 0.047716084867715836\r\nStep 1010, loss: 0.0476408414542675\r\nStep 1011, loss: 0.04675651341676712\r\nStep 1012, loss: 0.04638819769024849\r\nStep 1013, loss: 0.046266332268714905\r\nStep 1014, loss: 0.04523168504238129\r\nStep 1015, loss: 0.04490895941853523\r\nStep 1016, loss: 0.04396004602313042\r\nStep 1017, loss: 0.04407587647438049\r\nStep 1018, loss: 0.043728992342948914\r\nStep 1019, loss: 0.043021876364946365\r\nStep 1020, loss: 0.042737677693367004\r\nStep 1021, loss: 0.0419435054063797\r\nStep 1022, loss: 0.041405536234378815\r\nStep 1023, loss: 0.041298799216747284\r\nStep 1024, loss: 0.04074754938483238\r\nStep 1025, loss: 0.040655750781297684\r\nStep 1026, loss: 0.03989291563630104\r\nStep 1027, loss: 0.03932521864771843\r\nStep 1028, loss: 0.03926165774464607\r\nStep 1029, loss: 0.038410648703575134\r\nStep 1030, loss: 0.037977274507284164\r\nStep 1031, loss: 0.038197796791791916\r\nStep 1032, loss: 0.03719856217503548\r\nStep 1033, loss: 0.03733721747994423\r\nStep 1034, loss: 0.036742616444826126\r\nStep 1035, loss: 0.03588154539465904\r\nStep 1036, loss: 0.03571731969714165\r\nStep 1037, loss: 0.035179950296878815\r\nStep 1038, loss: 0.034926462918519974\r\nStep 1039, loss: 0.034345295280218124\r\nStep 1040, loss: 0.034014929085969925\r\nStep 1041, loss: 0.03364904969930649\r\nStep 1042, loss: 0.0334702730178833\r\nStep 1043, loss: 0.03299660235643387\r\nStep 1044, loss: 0.03263625130057335\r\nStep 1045, loss: 0.03241368383169174\r\nStep 1046, loss: 0.031813930720090866\r\nStep 1047, loss: 0.03131252899765968\r\nStep 1048, loss: 0.03151620179414749\r\nStep 1049, loss: 0.03115146979689598\r\nStep 1050, loss: 0.03134384751319885\r\nStep 1051, loss: 0.030595770105719566\r\nStep 1052, loss: 0.030392158776521683\r\nStep 1053, loss: 0.029602892696857452\r\nStep 1054, loss: 0.030338317155838013\r\nStep 1055, loss: 0.028732774779200554\r\nStep 1056, loss: 0.029287779703736305\r\nStep 1057, loss: 0.02844015695154667\r\nStep 1058, loss: 0.028144344687461853\r\nStep 1059, loss: 0.027715185657143593\r\nStep 1060, loss: 0.02736845798790455\r\nStep 1061, loss: 0.027427783235907555\r\nStep 1062, loss: 0.02679455652832985\r\nStep 1063, loss: 0.02666320838034153\r\nStep 1064, loss: 0.026576874777674675\r\nStep 1065, loss: 0.025922944769263268\r\nStep 1066, loss: 0.02580343186855316\r\nStep 1067, loss: 0.025831155478954315\r\nStep 1068, loss: 0.024951910600066185\r\nStep 1069, loss: 0.02512134425342083\r\nStep 1070, loss: 0.024761002510786057\r\nStep 1071, loss: 0.024328792467713356\r\nStep 1072, loss: 0.02458091266453266\r\nStep 1073, loss: 0.0236649252474308\r\nStep 1074, loss: 0.02398708090186119\r\nStep 1075, loss: 0.023102357983589172\r\nStep 1076, loss: 0.023069025948643684\r\nStep 1077, loss: 0.022844064980745316\r\nStep 1078, loss: 0.02251344732940197\r\nStep 1079, loss: 0.022413654252886772\r\nStep 1080, loss: 0.022110341116786003\r\nStep 1081, loss: 0.02200206182897091\r\nStep 1082, loss: 0.02136981301009655\r\nStep 1083, loss: 0.021670987829566002\r\nStep 1084, loss: 0.021102994680404663\r\nStep 1085, loss: 0.02108168788254261\r\nStep 1086, loss: 0.020908765494823456\r\nStep 1087, loss: 0.020433593541383743\r\nStep 1088, loss: 0.020202046260237694\r\nStep 1089, loss: 0.020047444850206375\r\nStep 1090, loss: 0.019758647307753563\r\nStep 1091, loss: 0.019615482538938522\r\nStep 1092, loss: 0.01929626613855362\r\nStep 1093, loss: 0.01891310140490532\r\nStep 1094, loss: 0.018891051411628723\r\nStep 1095, loss: 0.01885131187736988\r\nStep 1096, loss: 0.01856960542500019\r\nStep 1097, loss: 0.018240343779325485\r\nStep 1098, loss: 0.017955027520656586\r\nStep 1099, loss: 0.017796391621232033\r\nStep 1100, loss: 0.017860697582364082\r\nStep 1101, loss: 0.01747225411236286\r\nStep 1102, loss: 0.01713988557457924\r\nStep 1103, loss: 0.017180515453219414\r\nStep 1104, loss: 0.017039211466908455\r\nStep 1105, loss: 0.01669398695230484\r\nStep 1106, loss: 0.016584506258368492\r\nStep 1107, loss: 0.01622665487229824\r\nStep 1108, loss: 0.016016732901334763\r\nStep 1109, loss: 0.016028376296162605\r\nStep 1110, loss: 0.01570427417755127\r\nStep 1111, loss: 0.015501257963478565\r\nStep 1112, loss: 0.015613934956490993\r\nStep 1113, loss: 0.015449399128556252\r\nStep 1114, loss: 0.015045001171529293\r\nStep 1115, loss: 0.015044390223920345\r\nStep 1116, loss: 0.014772292226552963\r\nStep 1117, loss: 0.014750405214726925\r\nStep 1118, loss: 0.01463217195123434\r\nStep 1119, loss: 0.014220980927348137\r\nStep 1120, loss: 0.01435190811753273\r\nStep 1121, loss: 0.014992658980190754\r\nStep 1122, loss: 0.015058763325214386\r\nStep 1123, loss: 0.013814738020300865\r\nStep 1124, loss: 0.014502659440040588\r\nStep 1125, loss: 0.013513303361833096\r\nStep 1126, loss: 0.014175111427903175\r\nStep 1127, loss: 0.013332690112292767\r\nStep 1128, loss: 0.013805527240037918\r\nStep 1129, loss: 0.013089513406157494\r\nStep 1130, loss: 0.013423136435449123\r\nStep 1131, loss: 0.012712549418210983\r\nStep 1132, loss: 0.01310004387050867\r\nStep 1133, loss: 0.012440760619938374\r\nStep 1134, loss: 0.01279283594340086\r\nStep 1135, loss: 0.012165340594947338\r\nStep 1136, loss: 0.012258267030119896\r\nStep 1137, loss: 0.012086736969649792\r\nStep 1138, loss: 0.012027643620967865\r\nStep 1139, loss: 0.011731420643627644\r\nStep 1140, loss: 0.011787042021751404\r\nStep 1141, loss: 0.011482713744044304\r\nStep 1142, loss: 0.011478434316813946\r\nStep 1143, loss: 0.011350249871611595\r\nStep 1144, loss: 0.011211170814931393\r\nStep 1145, loss: 0.0110248401761055\r\nStep 1146, loss: 0.010958909057080746\r\nStep 1147, loss: 0.01079494971781969\r\nStep 1148, loss: 0.010710245929658413\r\nStep 1149, loss: 0.010534513741731644\r\nStep 1150, loss: 0.010494522750377655\r\nStep 1151, loss: 0.010368077084422112\r\nStep 1152, loss: 0.010263820178806782\r\nStep 1153, loss: 0.010247279889881611\r\nStep 1154, loss: 0.010061112232506275\r\nStep 1155, loss: 0.009866724722087383\r\nStep 1156, loss: 0.009970852173864841\r\nStep 1157, loss: 0.009857166558504105\r\nStep 1158, loss: 0.009639806114137173\r\nStep 1159, loss: 0.009560294449329376\r\nStep 1160, loss: 0.009501682594418526\r\n
| null |
terminal_output
|
1
| 4
|
train_dynamics.py
| 0
| 0
|
from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n warmup_steps: int = 5000\n lr_schedule : str = "wsd" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = ""\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = ""\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n use_maskgit: bool = False\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = ""\n project: str = ""\n name: str = "train_dynamics"\n tags: list[str] = field(default_factory=lambda: ["dynamics"])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = ""\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = ""\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """Compute masked dynamics loss"""\n inputs["videos"] = inputs["videos"].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={"params": inputs["rng"], "dropout": inputs["dropout_rng"]},\n )\n mask = outputs["mask"]\n outputs["token_logits"] = outputs["token_logits"].astype(jnp.float32)\n outputs["recon"] = outputs["recon"].astype(jnp.float32)\n logits = outputs["token_logits"]\n targets = outputs["video_tokens"]\n\n # if not args.use_maskgit:\n # logits = outputs["token_logits"][:, :, :-1]\n # targets = outputs["video_tokens"][:, :, 1:]\n # mask = outputs["mask"][:, :, 1:] \n\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n logits, targets\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = logits.argmax(-1) == targets\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(logits)\n gt = inputs["videos"].clip(0, 1).reshape(-1, *inputs["videos"].shape[2:])\n recon = outputs["recon"].clip(0, 1).reshape(-1, *outputs["recon"].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs["lam_indices"]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs["video_tokens"]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=logits.max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs["recon"], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """Update state and compute metrics"""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics["gradients_std/"] = jax.tree.map(\n lambda x: x.std(), grads["params"]["dynamics"]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == "__main__":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError("No JAX devices found.")\n print(f"Running on {num_devices} devices.")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f"Global batch size {args.batch_size} must be divisible by "\n f"number of devices {num_devices}."\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n use_maskgit=args.use_maskgit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n "entity": args.entity,\n "project": args.project,\n "name": args.name,\n "tags": args.tags,\n "group": "debug",\n "config": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n "id": args.wandb_id,\n "resume": "allow",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({"model_param_count": param_counts})\n\n print("Parameter counts:")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=("data",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec("data", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n "model_state", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n "model_state", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n # handler_registry.add("dataloader_state", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n # handler_registry.add("dataloader_state", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith(".array_record")\n ]\n # grain_dataloader = get_dataloader(\n # array_record_files,\n # args.seq_len,\n # # NOTE: We deliberately pass the global batch size\n # # The dataloader shards the dataset across all processes\n # args.batch_size,\n # *image_shape,\n # num_workers=8,\n # prefetch_buffer_size=1,\n # seed=args.seed,\n # )\n # initial_state = grain_dataloader._create_initial_state()\n # grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n pass\n # # Restore full dynamics model\n # abstract_train_state = jax.tree_util.tree_map(\n # ocp.utils.to_shape_dtype_struct, train_state\n # )\n # restored = checkpoint_manager.restore(\n # checkpoint_manager.latest_step(),\n # args=ocp.args.Composite(\n # model_state=ocp.args.StandardRestore(abstract_train_state),\n # dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n # ),\n # )\n # train_state = restored["model_state"]\n # grain_iterator = restored["dataloader_state"]\n # step = checkpoint_manager.latest_step() or 0\n # print(f"Restored dataloader and model state from step {step}")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n # dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n # for videos in dataloader:\n # for i in range(videos.shape[0]):\n # video_i = videos[i:i+1] # shape (1, T, H, W, C)\n # np.save(f"overfit_dir/oai_sample_seed69_{i}.npy", video_i)\n # jax.debug.breakpoint()\n videos = np.load("overfit_dir/oai_sample_seed69_1.npy") # *255.\n # videos = videos.astype(np.uint8)\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n while True:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics["lr"] = lr_schedule(step)\n print(f"Step {step}, loss: {loss}")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n "loss": loss,\n "step": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs["videos"][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, "t h w c -> h (t w) c"\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n # dataloader_state=grain.checkpoint.CheckpointSave(\n # grain_iterator\n # ),\n ),\n )\n print(f"Saved checkpoint at step {step}")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n
|
python
|
tab
|
2
| 4,945
|
slurm/jobs/mihir/horeka/yolo-runs/sampling.sh
| 0
| 0
|
\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/storage/user/mahajanm/Projects/world-modeling/checkpoints/tokenizer_ckpt\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\nsrun python sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4 \\n --seq_len=2 \\n --num_latent_actions=1 \\n --start_frame=0 \\n --data_dir $array_records_dir\n\n# srun python sample.py \\n # --checkpoint $dynamics_ckpt_dir \\n # --start_frame=0 \\n # --batch_size=12 \\n # --seq_len=2 \\n # --data_dir $array_records_dir\n
|
shellscript
|
tab
|
3
| 8,021
|
sample.py
| 0
| 0
|
from dataclasses import dataclass\nfrom typing import Optional\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom flax.training.train_state import TrainState\nimport grain\nimport orbax.checkpoint as ocp\nimport optax\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = "data/coinrun_episodes"\n checkpoint: str = ""\n checkpoint_step: Optional[int] = None\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_co_train: bool = True\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=args.lam_co_train,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n use_maskgit=False,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored["model_state"]\nparams = restored_train_state.params\n\n\ndef _sampling_wrapper(module, batch):\n # return module.sample_maskgit(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return module.sample_causal(batch, args.seq_len, args.temperature, args.sample_argmax)\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n # sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie)) \n sampling_fn = nn.apply(_sampling_wrapper, genie)\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(\n params,\n batch\n )\n return generated_vid\n\ndef _get_dataloader_iterator():\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith(".array_record")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n return grain_iterator\n\n# --- Get video + latent actions ---\n# grain_iterator = _get_dataloader_iterator()\n# video_batch = next(grain_iterator)\n# video_batch = np.load("overfit_dir/single_sample_corner.npy")\nvideo_batch = np.load("overfit_dir/oai_sample_seed69_1.npy") # *255.\n\n\nvideo_batch = video_batch.astype(args.dtype) / 255.0\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch[:,:args.seq_len])\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nprint("autoreg sampling...")\nvid = _autoreg_sample(rng, video_batch, action_batch)\nprint("autoreg sampling done. calculating ssim and saving video")\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f"SSIM: {ssim}")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, :args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, "n b t h w c -> t (b h) (n w) c")\n\n# --- Save video --- \nimgs = [Image.fromarray(img) for img in frames]\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\nfor t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch.shape[0]):\n action = action_batch[row, t, 0]\n y_offset = row * video_batch.shape[2] + 2\n d.text((2, y_offset), f"{action}", fill=255)\nimgs[0].save(\n f"generation_{time.time()}.gif",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n
|
python
|
tab
|
4
| 9,298
|
train_dynamics.py
| 0
| 0
| null |
python
|
tab
|
5
| 9,939
|
genie.py
| 0
| 0
|
from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsAutoregressive\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nn.Module):\n """Genie model"""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n use_maskgit: bool\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n if self.use_maskgit:\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n ) \n else:\n self.dynamics = DynamicsAutoregressive(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch["videos"], training=False)\n lam_outputs = self.lam.vq_encode(batch["videos"], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs["z_q"],\n lambda: jax.lax.stop_gradient(lam_outputs["z_q"])\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs["indices"]),\n latent_actions=latent_actions,\n )\n outputs["mask_rng"] = batch["mask_rng"]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs["token_logits"], axis=-1)\n outputs["recon"] = self.tokenizer.decode(\n mle_indices, batch["videos"].shape[2:4]\n )\n outputs["lam_indices"] = lam_outputs["indices"]\n return outputs\n\n\n def sample_causal(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ):\n """\n Autoregressively samples up to `seq_len` future frames using the causal transformer backend.\n\n - Input frames are tokenized once.\n - Future frames are generated one at a time, each conditioned on all previous frames.\n - All frames are detokenized in a single pass at the end.\n\n Args:\n batch: Dict with at least "videos" (B, T, H, W, C)\n seq_len: total number of frames to generate (including context)\n temperature: sampling temperature\n sample_argmax: if True, use argmax instead of sampling\n\n Returns:\n Generated video frames (B, seq_len, H, W, C)\n """\n # --- Encode context frames ---\n tokenizer_out = self.tokenizer.vq_encode(batch["videos"], training=False)\n token_idxs = tokenizer_out["indices"] # (B, T, N)\n B, T, N = token_idxs.shape\n\n # jax.debug.print("token_idxs shape: {}", token_idxs.shape)\n # --- Prepare initial token sequence ---\n # Pad with zeros for future frames\n pad_shape = (B, seq_len - T, N)\n token_idxs_full = jnp.concatenate(\n [token_idxs, jnp.zeros(pad_shape, dtype=token_idxs.dtype)], axis=1\n ) # (B, seq_len, N)\n\n # --- Prepare latent actions ---\n action_tokens = self.lam.vq.get_codes(batch["latent_actions"]) # (B, S-1, )\n # --- Autoregressive generation loop ---\n rng = batch["rng"]\n for t in range(T, seq_len):\n for n in range(N):\n jax.debug.print("Sampling token {} from frame {}", n, t)\n dyna_inputs = {\n "video_tokens": token_idxs_full,\n "latent_actions": action_tokens\n }\n # jax.debug.print("token_idxs_full 0: {}", token_idxs_full[0,:,0])\n dyna_outputs = self.dynamics(dyna_inputs, training=False)\n # # dyna_outputs["token_logits"]: (B, t, N, vocab_size)\n # # We want the logits for the last time step (frame t-1 predicting t)\n # jax.debug.breakpoint()\n next_token_logits = dyna_outputs["token_logits"][:, t, n, :].astype(jnp.float32) # (B, 1, vocab_size)\n\n # Sample or argmax for each patch\n if sample_argmax:\n next_token = jnp.argmax(next_token_logits, axis=-1) # (B, 1)\n else:\n rng, step_rng = jax.random.split(rng)\n next_token = jax.random.categorical(\n step_rng, next_token_logits / temperature, axis=-1\n ) # (B, 1)\n\n # Insert the generated tokens into the sequence\n token_idxs_full = token_idxs_full.at[:, t, n].set(next_token)\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n token_idxs_full, video_hw=batch["videos"].shape[2:4]\n )\n return final_frames\n\n\n @nn.compact\n def sample_maskgit(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by \n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size \n T: number of input (conditioning) frames \n N: patches per frame \n S: sequence length \n A: action space \n D: model latent dimension\n """\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch["videos"], training=False)\n token_idxs = tokenizer_out["indices"] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch["latent_actions"]) \n\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast="params",\n split_rngs={"params": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n \n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch["rng"], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn,\n initial_carry,\n timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch["videos"].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch["videos"], training=training)\n return lam_output["indices"]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1) \n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: jax.sharding.NamedSharding,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """Restore pre-trained Genie components"""\n rng, _rng = jax.random.split(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\n \n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n abstract_sharded_tokenizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_tokenizer_state),\n ),\n )["model_state"]\n restored_tokenizer_params = restored_tokenizer.params["params"]\n train_state.params["params"]["tokenizer"].update(restored_tokenizer_params)\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n lam_init_params = dummy_lam.init(_rng, inputs)\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n abstract_sharded_lam_state = _create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n restored_lam = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_lam_state),\n ),\n )["model_state"]\n restored_lam_params = restored_lam.params["params"]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params["params"]["lam"]\n }\n train_state.params["params"]["lam"].update(restored_lam_params)\n lam_checkpoint_manager.close()\n\n return train_state\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding."""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, "shape") and hasattr(leaf_template, "dtype"):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)
|
python
|
tab
|
6
| 10,885
|
train_dynamics.py
| 0
| 0
| null |
python
|
tab
|
7
| 21,382
|
models/dynamics.py
| 0
| 0
|
from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport einops\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """MaskGIT dynamics model"""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n spatial_bert=True,\n use_flash_attention=self.use_flash_attention,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n "mask_token",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch["video_tokens"])\n if training:\n rng1, rng2 = jax.random.split(batch["mask_rng"])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch["latent_actions"])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n\n\nclass DynamicsAutoregressive(nn.Module):\n """Autoregressive (causal) dynamics model"""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n self.use_flash_attention,\n spatial_bert=False,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n vid_embed = self.patch_embed(batch["video_tokens"])\n act_embed = self.action_up(batch["latent_actions"])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n # vid_embed_padded = jnp.pad(vid_embed, ((0, 0), (1, 0), (1, 0), (0, 0)))\n # logits = self.dynamics(vid_embed_padded)[:, :-1, :-1]\n vid_embed_padded = jnp.pad(vid_embed, ((0, 0), (0, 0), (1, 0), (0, 0)))\n\n # FIXME mihir: HACK\n # rng1, _rng = jax.random.split(batch["mask_rng"])\n # noise = jax.random.normal(_rng, vid_embed_padded.shape)\n # logits = self.dynamics(noise)[:, :, :-1]\n\n logits = self.dynamics(vid_embed_padded)[:, :, :-1]\n\n mask = jnp.ones(vid_embed.shape[:-1])\n return dict(token_logits=logits, mask=mask)
|
python
|
tab
|
8
| 23,082
|
models/dynamics.py
| 3,006
| 0
| null |
python
|
selection_mouse
|
9
| 23,644
|
models/dynamics.py
| 3,225
| 0
| null |
python
|
selection_mouse
|
10
| 24,217
|
models/dynamics.py
| 3,314
| 0
| null |
python
|
selection_mouse
|
11
| 25,310
|
models/dynamics.py
| 3,580
| 0
| null |
python
|
selection_mouse
|
12
| 26,045
|
models/dynamics.py
| 3,626
| 0
| null |
python
|
selection_mouse
|
13
| 26,052
|
models/dynamics.py
| 3,625
| 0
| null |
python
|
selection_command
|
14
| 26,546
|
models/dynamics.py
| 3,519
| 0
| null |
python
|
selection_mouse
|
15
| 27,045
|
models/dynamics.py
| 3,579
| 0
| null |
python
|
selection_mouse
|
16
| 27,046
|
models/dynamics.py
| 3,578
| 0
| null |
python
|
selection_command
|
17
| 32,793
|
models/dynamics.py
| 3,579
| 0
|
\n
|
python
|
content
|
18
| 33,091
|
models/dynamics.py
| 3,588
| 0
|
j
|
python
|
content
|
19
| 33,092
|
models/dynamics.py
| 3,589
| 0
| null |
python
|
selection_keyboard
|
20
| 33,177
|
models/dynamics.py
| 3,589
| 0
|
a
|
python
|
content
|
21
| 33,180
|
models/dynamics.py
| 3,590
| 0
| null |
python
|
selection_keyboard
|
22
| 33,428
|
models/dynamics.py
| 3,590
| 0
|
x
|
python
|
content
|
23
| 33,429
|
models/dynamics.py
| 3,591
| 0
| null |
python
|
selection_keyboard
|
24
| 33,554
|
models/dynamics.py
| 3,591
| 0
|
.
|
python
|
content
|
25
| 33,555
|
models/dynamics.py
| 3,592
| 0
| null |
python
|
selection_keyboard
|
26
| 33,937
|
models/dynamics.py
| 3,592
| 0
|
d
|
python
|
content
|
27
| 33,941
|
models/dynamics.py
| 3,593
| 0
| null |
python
|
selection_keyboard
|
28
| 34,257
|
models/dynamics.py
| 3,593
| 0
|
e
|
python
|
content
|
29
| 34,258
|
models/dynamics.py
| 3,594
| 0
| null |
python
|
selection_keyboard
|
30
| 34,377
|
models/dynamics.py
| 3,594
| 0
|
b
|
python
|
content
|
31
| 34,378
|
models/dynamics.py
| 3,595
| 0
| null |
python
|
selection_keyboard
|
32
| 34,531
|
models/dynamics.py
| 3,595
| 0
|
u
|
python
|
content
|
33
| 34,531
|
models/dynamics.py
| 3,596
| 0
| null |
python
|
selection_keyboard
|
34
| 34,618
|
models/dynamics.py
| 3,596
| 0
|
g
|
python
|
content
|
35
| 34,619
|
models/dynamics.py
| 3,597
| 0
| null |
python
|
selection_keyboard
|
36
| 34,826
|
models/dynamics.py
| 3,597
| 0
|
-
|
python
|
content
|
37
| 34,827
|
models/dynamics.py
| 3,598
| 0
| null |
python
|
selection_keyboard
|
38
| 35,128
|
models/dynamics.py
| 3,598
| 0
|
b
|
python
|
content
|
39
| 35,129
|
models/dynamics.py
| 3,599
| 0
| null |
python
|
selection_keyboard
|
40
| 35,487
|
models/dynamics.py
| 3,598
| 1
| null |
python
|
content
|
41
| 35,621
|
models/dynamics.py
| 3,597
| 1
| null |
python
|
content
|
42
| 35,866
|
models/dynamics.py
| 3,597
| 0
|
.
|
python
|
content
|
43
| 35,866
|
models/dynamics.py
| 3,598
| 0
| null |
python
|
selection_keyboard
|
44
| 36,844
|
models/dynamics.py
| 3,598
| 0
|
breakpoint
|
python
|
content
|
45
| 37,646
|
models/dynamics.py
| 3,608
| 0
|
()
|
python
|
content
|
46
| 37,648
|
models/dynamics.py
| 3,609
| 0
| null |
python
|
selection_keyboard
|
47
| 37,748
|
models/dynamics.py
| 3,609
| 1
|
)
|
python
|
content
|
48
| 37,749
|
models/dynamics.py
| 3,610
| 0
| null |
python
|
selection_keyboard
|
49
| 37,949
|
models/dynamics.py
| 3,609
| 0
| null |
python
|
selection_command
|
50
| 889,413
|
models/dynamics.py
| 3,709
| 0
| null |
python
|
selection_mouse
|
51
| 889,415
|
models/dynamics.py
| 3,708
| 0
| null |
python
|
selection_command
|
52
| 890,312
|
models/dynamics.py
| 3,562
| 0
| null |
python
|
selection_mouse
|
53
| 890,884
|
models/dynamics.py
| 3,610
| 0
| null |
python
|
selection_mouse
|
54
| 890,890
|
models/dynamics.py
| 3,609
| 0
| null |
python
|
selection_command
|
55
| 932,510
|
train_dynamics.py
| 0
| 0
| null |
python
|
tab
|
56
| 933,777
|
models/dynamics.py
| 0
| 0
| null |
python
|
tab
|
57
| 944,533
|
utils/nn.py
| 0
| 0
|
import math\nfrom typing import Tuple\nfrom functools import partial\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html"""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n# class STBlock2(nn.Module):\n # dim: int\n # num_heads: int\n # dropout: float\n # param_dtype: jnp.dtype\n # dtype: jnp.dtype\n\n # @nn.remat\n # @nn.compact\n # def __call__(self, x: jax.Array) -> jax.Array:\n # # --- Spatial attention ---\n # z = PositionalEncoding(self.dim)(x)\n # z = nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z)\n # causal_mask = jnp.tri(z.shape[-2])\n # z = nn.MultiHeadAttention(\n # num_heads=self.num_heads,\n # qkv_features=self.dim,\n # dropout_rate=self.dropout,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z, mask=causal_mask)\n # x = x + z\n\n # # --- Temporal attention ---\n # x = x.swapaxes(1, 2)\n # z = PositionalEncoding(self.dim)(x)\n # z = nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z)\n # causal_mask = jnp.tri(z.shape[-2])\n # z = nn.MultiHeadAttention(\n # num_heads=self.num_heads,\n # qkv_features=self.dim,\n # dropout_rate=self.dropout,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z, mask=causal_mask)\n # x = x + z\n # x = x.swapaxes(1, 2)\n\n # # --- Feedforward ---\n # z = nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(x)\n # # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n # z = nn.Dense(\n # self.dim,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z)\n # z = nn.gelu(z)\n # x = x + z\n\n # return x\n\n# class CausalTransformer(nn.Module):\n # model_dim: int\n # out_dim: int\n # num_blocks: int\n # num_heads: int\n # dropout: float\n # param_dtype: jnp.dtype\n # dtype: jnp.dtype\n\n # @nn.compact\n # def __call__(self, x: jax.Array) -> jax.Array:\n # # Input projection and normalization\n # x = nn.Sequential(\n # [\n # nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # ),\n # nn.Dense(self.model_dim,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # ),\n # nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # ),\n # ]\n # )(x)\n # # Causal transformer blocks\n # for _ in range(self.num_blocks):\n # x = STBlock2(\n # dim=self.model_dim,\n # num_heads=self.num_heads,\n # dropout=self.dropout,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(x)\n\n # # Output projection\n # x = nn.Dense(\n # self.out_dim,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(x)\n # return x # (B, T, E)\n\n\nclass STBlock(nn.Module):\n dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n spatial_bert: bool = True\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=not self.spatial_bert),\n # decode=True\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=True),\n # decode=True\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n spatial_bert: bool = True\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n spatial_bert=self.spatial_bert,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n "codebook",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng("dropout")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """\n \n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = 'cudnn' if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, '... l h d -> (...) l h d')\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False)\n return jnp.logical_and(attention_mask, expanded_mask)\n \n original_shape = query.shape\n original_seq_len = query.shape[-3]\n \n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n \n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n \n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n \n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n \n return attention_fn\n\n
|
python
|
tab
|
58
| 952,437
|
utils/nn.py
| 5,364
| 0
| null |
python
|
selection_mouse
|
59
| 952,610
|
utils/nn.py
| 5,363
| 5
|
shape
|
python
|
selection_mouse
|
60
| 953,304
|
utils/nn.py
| 5,350
| 0
| null |
python
|
selection_mouse
|
61
| 953,855
|
utils/nn.py
| 5,346
| 0
| null |
python
|
selection_mouse
|
62
| 954,017
|
utils/nn.py
| 5,339
| 11
|
causal_mask
|
python
|
selection_mouse
|
63
| 974,123
|
utils/nn.py
| 4,747
| 0
| null |
python
|
selection_mouse
|
64
| 975,405
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 0
| 0
|
# Copyright 2024 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n"""Attention core modules for Flax."""\nfrom __future__ import annotations\n\nimport functools\nimport inspect\nimport warnings\nfrom typing import Any, overload\nfrom collections.abc import Callable\n\nimport jax\nimport jax.numpy as jnp\nfrom jax import lax, random\n\nfrom flax.linen import initializers\nfrom flax.linen.dtypes import promote_dtype\nfrom flax.linen.linear import (\n DenseGeneral,\n default_kernel_init,\n)\nfrom flax.linen.module import Module, compact, merge_param\nfrom flax.linen.normalization import LayerNorm\nfrom flax.typing import (\n Array,\n PRNGKey,\n Dtype,\n Shape as Shape,\n Initializer,\n PrecisionLike,\n DotGeneralT,\n)\n\n\ndef dot_product_attention_weights(\n query: Array,\n key: Array,\n bias: Array | None = None,\n mask: Array | None = None,\n broadcast_dropout: bool = True,\n dropout_rng: PRNGKey | None = None,\n dropout_rate: float = 0.0,\n deterministic: bool = False,\n dtype: Dtype | None = None,\n precision: PrecisionLike = None,\n module: Module | None = None,\n force_fp32_for_softmax: bool = False,\n einsum_dot_general: Callable[..., Array] | None = None,\n einsum: Callable[..., Array] | None = None,\n):\n """Computes dot-product attention weights given query and key.\n\n Used by :func:`dot_product_attention`, which is what you'll most likely use.\n But if you want access to the attention weights for introspection, then\n you can directly call this function and call einsum yourself.\n\n Args:\n query: queries for calculating attention with shape of ``[batch...,\n q_length, num_heads, qk_depth_per_head]``.\n key: keys for calculating attention with shape of ``[batch..., kv_length,\n num_heads, qk_depth_per_head]``.\n bias: bias for the attention weights. This should be broadcastable to the\n shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for\n incorporating causal masks, padding masks, proximity bias, etc.\n mask: mask for the attention weights. This should be broadcastable to the\n shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for\n incorporating causal masks. Attention weights are masked out if their\n corresponding mask value is ``False``.\n broadcast_dropout: bool: use a broadcasted dropout along batch dims.\n dropout_rng: JAX PRNGKey: to be used for dropout\n dropout_rate: dropout rate\n deterministic: bool, deterministic or not (to apply dropout)\n dtype: the dtype of the computation (default: infer from inputs and params)\n precision: numerical precision of the computation see ``jax.lax.Precision``\n for details.\n module: the Module that will sow the attention weights into the\n 'intermediates' collection. Remember to mark 'intermediates' as mutable\n via ``mutable=['intermediates']`` in order to have that collection\n returned. If ``module`` is None, the attention weights will not be sowed.\n force_fp32_for_softmax: bool, whether to force the softmax to be computed in\n fp32. This is useful for mixed-precision training where higher precision\n is desired for numerical stability.\n einsum_dot_general: the dot_general to use in einsum.\n einsum: If unspecified, default `jnp.einsum` will be used. This argument is\n mutually exclusive with `precision` and `einsum_dot_general`.\n\n Raises:\n ValueError: if both `precision`/`einsum_dot_general` and `einsum` are\n specified.\n\n Returns:\n Output of shape ``[batch..., num_heads, q_length, kv_length]``.\n """\n if (precision or einsum_dot_general) and einsum:\n raise ValueError(\n 'precision/einsum_dot_general and einsum are mutually exclusive. Please'\n ' specify only one of them.'\n )\n if not einsum:\n einsum = functools.partial(\n jnp.einsum,\n precision=precision,\n _dot_general=einsum_dot_general\n if einsum_dot_general\n else jax.lax.dot_general,\n )\n\n query, key = promote_dtype(query, key, dtype=dtype)\n dtype = query.dtype\n\n assert query.ndim == key.ndim, 'q, k must have same rank.'\n assert query.shape[:-3] == key.shape[:-3], 'q, k batch dims must match.'\n assert query.shape[-2] == key.shape[-2], 'q, k num_heads must match.'\n assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'\n\n # calculate attention matrix\n depth = query.shape[-1]\n query = query / jnp.sqrt(depth).astype(dtype)\n # attn weight shape is (batch..., num_heads, q_length, kv_length)\n attn_weights = einsum('...qhd,...khd->...hqk', query, key)\n\n # apply attention bias: masking, dropout, proximity bias, etc.\n if bias is not None:\n attn_weights = attn_weights + bias\n # apply attention mask\n if mask is not None:\n big_neg = jnp.finfo(dtype).min\n attn_weights = jnp.where(mask, attn_weights, big_neg)\n\n # normalize the attention weights\n if force_fp32_for_softmax and dtype != jnp.float32:\n attn_weights = jax.nn.softmax(attn_weights.astype(jnp.float32))\n else:\n attn_weights = jax.nn.softmax(attn_weights).astype(dtype)\n\n if module:\n module.sow('intermediates', 'attention_weights', attn_weights)\n\n # apply attention dropout\n if not deterministic and dropout_rate > 0.0:\n keep_prob = 1.0 - dropout_rate\n if broadcast_dropout:\n # dropout is broadcast across the batch + head dimensions\n dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]\n keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape) # type: ignore\n else:\n keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape) # type: ignore\n multiplier = keep.astype(dtype) / jnp.asarray(keep_prob, dtype=dtype)\n attn_weights = attn_weights * multiplier\n\n return attn_weights\n\n\ndef dot_product_attention(\n query: Array,\n key: Array,\n value: Array,\n bias: Array | None = None,\n mask: Array | None = None,\n broadcast_dropout: bool = True,\n dropout_rng: PRNGKey | None = None,\n dropout_rate: float = 0.0,\n deterministic: bool = False,\n dtype: Dtype | None = None,\n precision: PrecisionLike = None,\n module: Module | None = None,\n force_fp32_for_softmax: bool = False,\n einsum_dot_general: Callable[..., Array] | None = None,\n qk_attn_weights_einsum: Callable[..., Array] | None = None,\n attn_weights_value_einsum: Callable[..., Array] | None = None,\n):\n """Computes dot-product attention given query, key, and value.\n\n This is the core function for applying attention based on\n https://arxiv.org/abs/1706.03762. It calculates the attention weights given\n query and key and combines the values using the attention weights.\n\n .. note::\n ``query``, ``key``, ``value`` needn't have any batch dimensions.\n\n Args:\n query: queries for calculating attention with shape of ``[batch...,\n q_length, num_heads, qk_depth_per_head]``.\n key: keys for calculating attention with shape of ``[batch..., kv_length,\n num_heads, qk_depth_per_head]``.\n value: values to be used in attention with shape of ``[batch..., kv_length,\n num_heads, v_depth_per_head]``.\n bias: bias for the attention weights. This should be broadcastable to the\n shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for\n incorporating causal masks, padding masks, proximity bias, etc.\n mask: mask for the attention weights. This should be broadcastable to the\n shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for\n incorporating causal masks. Attention weights are masked out if their\n corresponding mask value is ``False``.\n broadcast_dropout: bool: use a broadcasted dropout along batch dims.\n dropout_rng: JAX PRNGKey: to be used for dropout\n dropout_rate: dropout rate\n deterministic: bool, deterministic or not (to apply dropout)\n dtype: the dtype of the computation (default: infer from inputs)\n precision: numerical precision of the computation see ``jax.lax.Precision`\n for details.\n module: the Module that will sow the attention weights into the\n 'intermediates' collection. Remember to mark 'intermediates' as mutable\n via ``mutable=['intermediates']`` in order to have that collection\n returned. If ``module`` is None, the attention weights will not be sowed.\n force_fp32_for_softmax: bool, whether to force the softmax to be computed in\n fp32. This is useful for mixed-precision training where higher precision\n is desired for numerical stability.\n einsum_dot_general: the dot_general to use in `jnp.einsum`.\n qk_attn_weights_einsum: the einsum for computing the attention weights. When\n unspecified, the default `jnp.einsum` will be used. This argument is\n mutually exclusive with `precision` and `einsum_dot_general`.\n attn_weights_value_einsum: the einsum for computing the product of the\n attention weights and the values. When unspecified, the default\n `jnp.einsum` will be used. This argument is mutually exclusive with\n `precision` and `einsum_dot_general`.\n\n Returns:\n Output of shape ``[batch..., q_length, num_heads, v_depth_per_head]``.\n\n Raises:\n ValueError: if both `precision`/`einsum_dot_general` and\n `qk_attn_weights_einsum`/`attn_weights_value_einsum` are\n specified.\n """\n if (qk_attn_weights_einsum and not attn_weights_value_einsum) or (\n not qk_attn_weights_einsum and attn_weights_value_einsum\n ):\n raise ValueError(\n 'qk_attn_weights_einsum and attn_weights_value_einsum must be specified'\n ' together.'\n )\n if (precision or einsum_dot_general) and (\n qk_attn_weights_einsum or attn_weights_value_einsum\n ):\n raise ValueError(\n 'precision/einsum_dot_general and'\n ' qk_attn_weights_einsum/attn_weights_value_einsum are mutually'\n ' exclusive. Please specify only one of them.'\n )\n\n query, key, value = promote_dtype(query, key, value, dtype=dtype)\n dtype = query.dtype\n assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'\n assert (\n query.shape[:-3] == key.shape[:-3] == value.shape[:-3]\n ), 'q, k, v batch dims must match.'\n assert (\n query.shape[-2] == key.shape[-2] == value.shape[-2]\n ), 'q, k, v num_heads must match.'\n assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'\n\n # compute attention weights\n attn_weights = dot_product_attention_weights(\n query,\n key,\n bias,\n mask,\n broadcast_dropout,\n dropout_rng,\n dropout_rate,\n deterministic,\n dtype,\n precision,\n module,\n force_fp32_for_softmax,\n einsum_dot_general=einsum_dot_general,\n einsum=qk_attn_weights_einsum,\n )\n if not attn_weights_value_einsum:\n attn_weights_value_einsum = functools.partial(\n jnp.einsum,\n precision=precision,\n _dot_general=einsum_dot_general\n if einsum_dot_general\n else jax.lax.dot_general,\n )\n # return weighted sum over values for each query position\n return attn_weights_value_einsum(\n '...hqk,...khd->...qhd',\n attn_weights,\n value,\n )\n\n\nclass MultiHeadDotProductAttention(Module):\n """Multi-head dot-product attention.\n\n Example usage::\n\n >>> import flax.linen as nn\n >>> import jax\n\n >>> layer = nn.MultiHeadDotProductAttention(num_heads=8, qkv_features=16)\n >>> key1, key2, key3, key4, key5, key6 = jax.random.split(jax.random.key(0), 6)\n >>> shape = (4, 3, 2, 5)\n >>> q, k, v = jax.random.uniform(key1, shape), jax.random.uniform(key2, shape), jax.random.uniform(key3, shape)\n >>> variables = layer.init(jax.random.key(0), q)\n\n >>> # different inputs for inputs_q, inputs_k and inputs_v\n >>> out = layer.apply(variables, q, k, v)\n >>> # equivalent to layer.apply(variables, inputs_q=q, inputs_k=k, inputs_v=k)\n >>> out = layer.apply(variables, q, k)\n >>> # equivalent to layer.apply(variables, inputs_q=q, inputs_k=q) and layer.apply(variables, inputs_q=q, inputs_k=q, inputs_v=q)\n >>> out = layer.apply(variables, q)\n\n >>> attention_kwargs = dict(\n ... num_heads=8,\n ... qkv_features=16,\n ... kernel_init=nn.initializers.ones,\n ... bias_init=nn.initializers.zeros,\n ... dropout_rate=0.5,\n ... deterministic=False,\n ... )\n >>> class Module(nn.Module):\n ... attention_kwargs: dict\n ...\n ... @nn.compact\n ... def __call__(self, x, dropout_rng=None):\n ... out1 = nn.MultiHeadDotProductAttention(**self.attention_kwargs)(x, dropout_rng=dropout_rng)\n ... out2 = nn.MultiHeadDotProductAttention(**self.attention_kwargs)(x, dropout_rng=dropout_rng)\n ... return out1, out2\n >>> module = Module(attention_kwargs)\n >>> variables = module.init({'params': key1, 'dropout': key2}, q)\n\n >>> # out1 and out2 are different.\n >>> out1, out2 = module.apply(variables, q, rngs={'dropout': key3})\n >>> # out3 and out4 are different.\n >>> # out1 and out3 are different. out2 and out4 are different.\n >>> out3, out4 = module.apply(variables, q, rngs={'dropout': key4})\n >>> # out1 and out2 are the same.\n >>> out1, out2 = module.apply(variables, q, dropout_rng=key5)\n >>> # out1 and out2 are the same as out3 and out4.\n >>> # providing a `dropout_rng` arg will take precedence over the `rngs` arg in `.apply`\n >>> out3, out4 = module.apply(variables, q, rngs={'dropout': key6}, dropout_rng=key5)\n\n Attributes:\n num_heads: Number of attention heads. Features (i.e. inputs_q.shape[-1])\n should be divisible by the number of heads.\n dtype: The dtype of the computation (default: infer from inputs and params)\n param_dtype: The dtype passed to parameter initializers (default: float32)\n qkv_features: Dimension of the key, query, and value.\n out_features: Dimension of the last projection\n broadcast_dropout: Use a broadcasted dropout along batch dims.\n dropout_rate: Dropout rate.\n deterministic: If False, the attention weight is masked randomly using\n dropout, whereas if True, the attention weights are deterministic.\n precision: Numerical precision of the computation see ``jax.lax.Precision``\n for details.\n kernel_init: Initializer for the kernel of the Dense layers.\n out_kernel_init: Optional Initializer for the kernel of the output Dense layer,\n if None, ``kernel_init`` will be used.\n bias_init: Initializer for the bias of the Dense layers.\n out_bias_init: Optional Initializer for the bias of the output Dense layer,\n if None, ``bias_init`` will be used.\n use_bias: Whether pointwise QKVO dense transforms use bias.\n attention_fn: dot_product_attention or compatible function. Accepts query,\n key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``\n decode: Whether to prepare and use an autoregressive cache.\n normalize_qk: Should QK normalization be applied (arxiv.org/abs/2302.05442).\n qk_attn_weights_einsum_cls: factory function to create the einsum for\n computing the attention weights.\n attn_weights_value_einsum_cls: factory function to create the einsum for\n computing the product of the attention weights and the values.\n """\n\n num_heads: int\n dtype: Dtype | None = None\n param_dtype: Dtype = jnp.float32\n qkv_features: int | None = None\n out_features: int | None = None\n broadcast_dropout: bool = True\n dropout_rate: float = 0.0\n deterministic: bool | None = None\n precision: PrecisionLike = None\n kernel_init: Initializer = default_kernel_init\n out_kernel_init: Initializer | None = None\n bias_init: Initializer = initializers.zeros_init()\n out_bias_init: Initializer | None = None\n use_bias: bool = True\n attention_fn: Callable[..., Array] = dot_product_attention\n decode: bool = False\n normalize_qk: bool = False\n force_fp32_for_softmax: bool = False\n # Deprecated, will be removed.\n qkv_dot_general: DotGeneralT | None = None\n out_dot_general: DotGeneralT | None = None\n qkv_dot_general_cls: Any = None\n out_dot_general_cls: Any = None\n qk_attn_weights_einsum_cls: Callable[..., Callable[..., Array]] | None = None\n attn_weights_value_einsum_cls: Callable[..., Callable[..., Array]] | None = (\n None\n )\n\n @overload\n def __call__(\n self,\n inputs_q: Array,\n inputs_k: Array | None = None,\n inputs_v: Array | None = None,\n *,\n mask: Array | None = None,\n deterministic: bool | None = None,\n dropout_rng: PRNGKey | None = None,\n sow_weights: bool = False,\n ):\n ...\n\n @overload\n def __call__(\n self,\n inputs_q: Array,\n *,\n inputs_kv: Array | None = None,\n mask: Array | None = None,\n deterministic: bool | None = None,\n dropout_rng: PRNGKey | None = None,\n sow_weights: bool = False,\n ):\n ...\n\n @compact\n def __call__(\n self,\n inputs_q: Array,\n inputs_k: Array | None = None,\n inputs_v: Array | None = None,\n *,\n inputs_kv: Array | None = None,\n mask: Array | None = None,\n deterministic: bool | None = None,\n dropout_rng: PRNGKey | None = None,\n sow_weights: bool = False,\n ):\n """Applies multi-head dot product attention on the input data.\n\n Projects the inputs into multi-headed query, key, and value vectors,\n applies dot-product attention and project the results to an output vector.\n\n If both inputs_k and inputs_v are None, they will both copy the value of\n inputs_q (self attention).\n If only inputs_v is None, it will copy the value of inputs_k.\n\n Args:\n inputs_q: input queries of shape ``[batch_sizes..., length, features]``.\n inputs_k: key of shape ``[batch_sizes..., length, features]``. If None,\n inputs_k will copy the value of inputs_q.\n inputs_v: values of shape ``[batch_sizes..., length, features]``. If None,\n inputs_v will copy the value of inputs_k.\n inputs_kv: key/values of shape ``[batch_sizes..., length, features]``. If\n None, inputs_kv will copy the value of inputs_q. This arg will be\n deprecated soon. Use inputs_k and inputs_v instead.\n mask: attention mask of shape ``[batch_sizes..., num_heads, query_length,\n key/value_length]``. Attention weights are masked out if their\n corresponding mask value is ``False``.\n deterministic: if false, the attention weight is masked randomly using\n dropout, whereas if true, the attention weights are deterministic.\n dropout_rng: optional rng key to pass to the attention layer's dropout\n mask. Otherwise, self.make_rng('dropout') is used instead.\n sow_weights: if ``True``, the attention weights are sowed into the\n 'intermediates' collection. Remember to mark 'intermediates' as\n mutable via ``mutable=['intermediates']`` in order to have that\n collection returned.\n\n Returns:\n output of shape ``[batch_sizes..., length, features]``.\n """\n if inputs_kv is not None:\n if inputs_k is not None or inputs_v is not None:\n raise ValueError(\n 'If either `inputs_k` or `inputs_v` is not None, '\n '`inputs_kv` must be None. If `inputs_kv` is not None, both `inputs_k` '\n 'and `inputs_v` must be None. We recommend using `inputs_k` and '\n '`inputs_v` args, since `inputs_kv` will be deprecated soon. See '\n 'https://github.com/google/flax/discussions/3389 for more '\n 'information.'\n )\n inputs_k = inputs_v = inputs_kv\n warnings.warn(\n 'The inputs_kv arg will be deprecated soon. '\n 'Use inputs_k and inputs_v instead. See '\n 'https://github.com/google/flax/discussions/3389 '\n 'for more information.',\n DeprecationWarning,\n )\n else:\n if inputs_k is None:\n if inputs_v is not None:\n raise ValueError(\n '`inputs_k` cannot be None if `inputs_v` is not None. '\n 'To have both `inputs_k` and `inputs_v` be the same value, pass in the '\n 'value to `inputs_k` and leave `inputs_v` as None.'\n )\n inputs_k = inputs_q\n if inputs_v is None:\n inputs_v = inputs_k\n elif inputs_v.shape[-1] == inputs_v.shape[-2]:\n warnings.warn(\n f'You are passing an array of shape {inputs_v.shape} '\n 'to the `inputs_v` arg, when you may have intended '\n 'to pass it to the `mask` arg. As of Flax version '\n '0.7.4, the function signature of '\n "MultiHeadDotProductAttention's `__call__` method "\n 'has changed to `__call__(inputs_q, inputs_k=None, '\n 'inputs_v=None, *, inputs_kv=None, mask=None, '\n 'deterministic=None)`. Use the kwarg `mask` instead. '\n 'See https://github.com/google/flax/discussions/3389 '\n 'and read the docstring for more information.',\n DeprecationWarning,\n )\n\n features = self.out_features or inputs_q.shape[-1]\n qkv_features = self.qkv_features or inputs_q.shape[-1]\n assert qkv_features % self.num_heads == 0, (\n f'Memory dimension ({qkv_features}) must be divisible by number of'\n f' heads ({self.num_heads}).'\n )\n head_dim = qkv_features // self.num_heads\n\n dense = functools.partial(\n DenseGeneral,\n axis=-1,\n dtype=self.dtype,\n param_dtype=self.param_dtype,\n features=(self.num_heads, head_dim),\n kernel_init=self.kernel_init,\n bias_init=self.bias_init,\n use_bias=self.use_bias,\n precision=self.precision,\n dot_general=self.qkv_dot_general,\n dot_general_cls=self.qkv_dot_general_cls,\n )\n # project inputs_q to multi-headed q/k/v\n # dimensions are then [batch..., length, n_heads, n_features_per_head]\n query, key, value = (\n dense(name='query')(inputs_q),\n dense(name='key')(inputs_k),\n dense(name='value')(inputs_v),\n )\n\n if self.normalize_qk:\n # Normalizing query and key projections stabilizes training with higher\n # LR. See ViT-22B paper http://arxiv.org/abs/2302.05442 for analysis.\n query = LayerNorm(\n name='query_ln',\n use_bias=False,\n dtype=self.dtype,\n param_dtype=self.param_dtype,\n )(query) # type: ignore[call-arg]\n key = LayerNorm(\n name='key_ln',\n use_bias=False,\n dtype=self.dtype,\n param_dtype=self.param_dtype,\n )(key) # type: ignore[call-arg]\n\n # During fast autoregressive decoding, we feed one position at a time,\n # and cache the keys and values step by step.\n if self.decode:\n # detect if we're initializing by absence of existing cache data.\n is_initialized = self.has_variable('cache', 'cached_key')\n cached_key = self.variable(\n 'cache', 'cached_key', jnp.zeros, key.shape, key.dtype\n )\n cached_value = self.variable(\n 'cache', 'cached_value', jnp.zeros, value.shape, value.dtype\n )\n cache_index = self.variable(\n 'cache', 'cache_index', lambda: jnp.array(0, dtype=jnp.int32)\n )\n if is_initialized:\n (\n *batch_dims,\n max_length,\n num_heads,\n depth_per_head,\n ) = cached_key.value.shape\n # shape check of cached keys against query input\n expected_shape = tuple(batch_dims) + (1, num_heads, depth_per_head)\n if expected_shape != query.shape:\n raise ValueError(\n 'Autoregressive cache shape error, '\n 'expected query shape %s instead got %s.'\n % (expected_shape, query.shape)\n )\n # update key, value caches with our new 1d spatial slices\n cur_index = cache_index.value\n zero = jnp.array(0, dtype=lax.dtype(cur_index.dtype))\n indices: tuple[int | jax.Array, ...] = (zero,) * len(\n batch_dims\n ) + (\n cur_index,\n zero,\n zero,\n )\n key = lax.dynamic_update_slice(cached_key.value, key, indices)\n value = lax.dynamic_update_slice(cached_value.value, value, indices)\n cached_key.value = key\n cached_value.value = value\n cache_index.value = cache_index.value + 1\n # causal mask for cached decoder self-attention:\n # our single query position should only attend to those key\n # positions that have already been generated and cached,\n # not the remaining zero elements.\n mask = combine_masks(\n mask,\n jnp.broadcast_to(\n jnp.arange(max_length) <= cur_index,\n tuple(batch_dims) + (1, 1, max_length),\n ),\n )\n\n if (\n self.dropout_rate > 0.0\n ): # Require `deterministic` only if using dropout.\n m_deterministic = merge_param(\n 'deterministic', self.deterministic, deterministic\n )\n if not m_deterministic and dropout_rng is None:\n dropout_rng = self.make_rng('dropout')\n else:\n m_deterministic = True\n\n # `qk_attn_weights_einsum` and `attn_weights_value_einsum` are optional\n # arguments that can be used to override the default `jnp.einsum`. They\n # exist for quantized einsum support in AQT.\n qk_attn_weights_einsum = (\n self.qk_attn_weights_einsum_cls()\n if self.qk_attn_weights_einsum_cls\n else None\n )\n attn_weights_value_einsum = (\n self.attn_weights_value_einsum_cls()\n if self.attn_weights_value_einsum_cls\n else None\n )\n # apply attention\n attn_args = (query, key, value)\n # This kwargs list match the default nn.dot_product_attention.\n # For custom `attention_fn`s, invalid kwargs will be filtered.\n attn_kwargs = dict(\n mask=mask,\n dropout_rng=dropout_rng,\n dropout_rate=self.dropout_rate,\n broadcast_dropout=self.broadcast_dropout,\n deterministic=m_deterministic,\n dtype=self.dtype,\n precision=self.precision,\n force_fp32_for_softmax=self.force_fp32_for_softmax,\n qk_attn_weights_einsum=qk_attn_weights_einsum,\n attn_weights_value_einsum=attn_weights_value_einsum,\n )\n attn_kwargs = {\n k: v\n for k, v in attn_kwargs.items()\n if k in inspect.signature(self.attention_fn).parameters\n }\n if sow_weights:\n x = self.attention_fn(*attn_args, **attn_kwargs, module=self)\n else:\n x = self.attention_fn(*attn_args, **attn_kwargs)\n # back to the original inputs dimensions\n out = DenseGeneral(\n features=features,\n axis=(-2, -1),\n kernel_init=self.out_kernel_init or self.kernel_init,\n bias_init=self.out_bias_init or self.bias_init,\n use_bias=self.use_bias,\n dtype=self.dtype,\n param_dtype=self.param_dtype,\n precision=self.precision,\n dot_general=self.out_dot_general,\n dot_general_cls=self.out_dot_general_cls,\n name='out', # type: ignore[call-arg]\n )(x)\n return out\n\n\nclass MultiHeadAttention(MultiHeadDotProductAttention):\n """Multi-head dot-product attention.\n Alias for ``MultiHeadDotProductAttention``.\n\n **NOTE**: ``MultiHeadAttention`` is a wrapper of ``MultiHeadDotProductAttention``,\n and so their implementations are identical. However ``MultiHeadAttention`` layers\n will, by default, be named ``MultiHeadAttention_{index}``, whereas ``MultiHeadDotProductAttention``\n will be named ``MultiHeadDotProductAttention_{index}``. Therefore, this could affect\n checkpointing, param collection names and RNG threading (since the layer name is\n used when generating new RNG's) within the module.\n\n Example usage::\n\n >>> import flax.linen as nn\n >>> import jax\n\n >>> layer = nn.MultiHeadAttention(num_heads=8, qkv_features=16)\n >>> key1, key2, key3, key4, key5, key6 = jax.random.split(jax.random.key(0), 6)\n >>> shape = (4, 3, 2, 5)\n >>> q, k, v = jax.random.uniform(key1, shape), jax.random.uniform(key2, shape), jax.random.uniform(key3, shape)\n >>> variables = layer.init(jax.random.key(0), q)\n\n >>> # different inputs for inputs_q, inputs_k and inputs_v\n >>> out = layer.apply(variables, q, k, v)\n >>> # equivalent to layer.apply(variables, inputs_q=q, inputs_k=k, inputs_v=k)\n >>> out = layer.apply(variables, q, k)\n >>> # equivalent to layer.apply(variables, inputs_q=q, inputs_k=q) and layer.apply(variables, inputs_q=q, inputs_k=q, inputs_v=q)\n >>> out = layer.apply(variables, q)\n\n >>> attention_kwargs = dict(\n ... num_heads=8,\n ... qkv_features=16,\n ... kernel_init=nn.initializers.ones,\n ... bias_init=nn.initializers.zeros,\n ... dropout_rate=0.5,\n ... deterministic=False,\n ... )\n >>> class Module(nn.Module):\n ... attention_kwargs: dict\n ...\n ... @nn.compact\n ... def __call__(self, x, dropout_rng=None):\n ... out1 = nn.MultiHeadAttention(**self.attention_kwargs)(x, dropout_rng=dropout_rng)\n ... out2 = nn.MultiHeadAttention(**self.attention_kwargs)(x, dropout_rng=dropout_rng)\n ... return out1, out2\n >>> module = Module(attention_kwargs)\n >>> variables = module.init({'params': key1, 'dropout': key2}, q)\n\n >>> # out1 and out2 are different.\n >>> out1, out2 = module.apply(variables, q, rngs={'dropout': key3})\n >>> # out3 and out4 are different.\n >>> # out1 and out3 are different. out2 and out4 are different.\n >>> out3, out4 = module.apply(variables, q, rngs={'dropout': key4})\n >>> # out1 and out2 are the same.\n >>> out1, out2 = module.apply(variables, q, dropout_rng=key5)\n >>> # out1 and out2 are the same as out3 and out4.\n >>> # providing a `dropout_rng` arg will take precedence over the `rngs` arg in `.apply`\n >>> out3, out4 = module.apply(variables, q, rngs={'dropout': key6}, dropout_rng=key5)\n\n Attributes:\n num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])\n should be divisible by the number of heads.\n dtype: the dtype of the computation (default: infer from inputs and params)\n param_dtype: the dtype passed to parameter initializers (default: float32)\n qkv_features: dimension of the key, query, and value.\n out_features: dimension of the last projection\n broadcast_dropout: bool: use a broadcasted dropout along batch dims.\n dropout_rate: dropout rate\n deterministic: if false, the attention weight is masked randomly using\n dropout, whereas if true, the attention weights are deterministic.\n precision: numerical precision of the computation see ``jax.lax.Precision``\n for details.\n kernel_init: initializer for the kernel of the Dense layers.\n bias_init: initializer for the bias of the Dense layers.\n use_bias: bool: whether pointwise QKVO dense transforms use bias.\n attention_fn: dot_product_attention or compatible function. Accepts query,\n key, value, and returns output of shape ``[bs, dim1, dim2, ..., dimN,,\n num_heads, value_channels]``\n decode: whether to prepare and use an autoregressive cache.\n normalize_qk: should QK normalization be applied (arxiv.org/abs/2302.05442).\n """\n\n\nclass SelfAttention(MultiHeadDotProductAttention):\n """Self-attention special case of multi-head dot-product attention.\n This layer is deprecated in favor of ``MultiHeadDotProductAttention``.\n\n Example usage::\n >>> import flax.linen as nn\n >>> import jax, jax.numpy as jnp\n >>> layer = nn.MultiHeadDotProductAttention(num_heads=8, qkv_features=16)\n >>> variables = layer.init(jax.random.key(0), jnp.ones((4, 3, 2, 5)))\n """\n\n @compact\n def __call__( # type: ignore\n self,\n inputs_q: Array,\n mask: Array | None = None,\n deterministic: bool | None = None,\n dropout_rng: PRNGKey | None = None,\n sow_weights: bool = False,\n ):\n """Applies multi-head dot product self-attention on the input data.\n\n Projects the inputs into multi-headed query, key, and value vectors,\n applies dot-product attention and project the results to an output vector.\n\n Args:\n inputs_q: input queries of shape ``[batch_sizes..., length, features]``.\n mask: attention mask of shape ``[batch_sizes..., num_heads, query_length,\n key/value_length]``. Attention weights are masked out if their\n corresponding mask value is ``False``.\n deterministic: if false, the attention weight is masked randomly using\n dropout, whereas if true, the attention weights are deterministic.\n\n Returns:\n output of shape ``[batch_sizes..., length, features]``.\n """\n warnings.warn(\n 'SelfAttention will be deprecated soon. Use '\n '`MultiHeadDotProductAttention.__call__(inputs_q)` instead. '\n 'See https://github.com/google/flax/discussions/3389 '\n 'for more information.',\n DeprecationWarning,\n )\n return super().__call__(\n inputs_q,\n mask=mask,\n deterministic=deterministic,\n dropout_rng=dropout_rng,\n sow_weights=sow_weights,\n )\n\n\n# mask-making utility functions\n\n\ndef make_attention_mask(\n query_input: Array,\n key_input: Array,\n pairwise_fn: Callable[..., Any] = jnp.multiply,\n extra_batch_dims: int = 0,\n dtype: Dtype = jnp.float32,\n):\n """Mask-making helper for attention weights.\n\n In case of 1d inputs (i.e., ``[batch..., len_q]``, ``[batch..., len_kv]``, the\n attention weights will be ``[batch..., heads, len_q, len_kv]`` and this\n function will produce ``[batch..., 1, len_q, len_kv]``.\n\n Args:\n query_input: a batched, flat input of query_length size\n key_input: a batched, flat input of key_length size\n pairwise_fn: broadcasting elementwise comparison function\n extra_batch_dims: number of extra batch dims to add singleton axes for, none\n by default\n dtype: mask return dtype\n\n Returns:\n A ``[batch..., 1, len_q, len_kv]`` shaped mask for 1d attention.\n """\n mask = pairwise_fn(\n jnp.expand_dims(query_input, axis=-1), jnp.expand_dims(key_input, axis=-2)\n )\n mask = jnp.expand_dims(mask, axis=-3)\n mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims)))\n return mask.astype(dtype)\n\n\ndef make_causal_mask(\n x: Array, extra_batch_dims: int = 0, dtype: Dtype = jnp.float32\n) -> Array:\n """Make a causal mask for self-attention.\n\n In case of 1d inputs (i.e., ``[batch..., len]``, the self-attention weights\n will be ``[batch..., heads, len, len]`` and this function will produce a\n causal mask of shape ``[batch..., 1, len, len]``.\n\n Args:\n x: input array of shape ``[batch..., len]``\n extra_batch_dims: number of batch dims to add singleton axes for, none by\n default\n dtype: mask return dtype\n\n Returns:\n A ``[batch..., 1, len, len]`` shaped causal mask for 1d attention.\n """\n idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)\n return make_attention_mask(\n idxs,\n idxs,\n jnp.greater_equal,\n extra_batch_dims=extra_batch_dims,\n dtype=dtype,\n )\n\n\ndef combine_masks(\n *masks: Array | None, dtype: Dtype = jnp.float32\n) -> Array | None:\n """Combine attention masks.\n\n Args:\n *masks: set of attention mask arguments to combine, some can be None.\n dtype: dtype for the returned mask.\n\n Returns:\n Combined mask, reduced by logical and, returns None if no masks given.\n """\n masks_list = [m for m in masks if m is not None]\n if not masks_list:\n return None\n assert all(\n map(lambda x: x.ndim == masks_list[0].ndim, masks_list)\n ), f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks_list))}'\n mask, *other_masks = masks_list\n for other_mask in other_masks:\n mask = jnp.logical_and(mask, other_mask)\n return mask.astype(dtype)\n
|
python
|
tab
|
65
| 977,694
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 27,397
| 0
| null |
python
|
selection_mouse
|
66
| 977,822
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 27,396
| 1
|
python
|
selection_mouse
|
|
67
| 978,001
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 27,396
| 2
|
`
|
python
|
selection_mouse
|
68
| 978,041
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 27,396
| 21
|
``MultiHeadAttention
|
python
|
selection_mouse
|
69
| 978,877
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 27,281
| 0
| null |
python
|
selection_mouse
|
70
| 983,050
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,462
| 0
| null |
python
|
selection_mouse
|
71
| 983,643
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,278
| 0
| null |
python
|
selection_mouse
|
72
| 983,806
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,278
| 9
|
num_heads
|
python
|
selection_mouse
|
73
| 984,370
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,314
| 0
| null |
python
|
selection_mouse
|
74
| 984,534
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,311
| 6
|
decode
|
python
|
selection_mouse
|
75
| 985,393
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,378
| 0
| null |
python
|
selection_mouse
|
76
| 985,560
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,375
| 12
|
normalize_qk
|
python
|
selection_mouse
|
77
| 986,876
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,574
| 0
| null |
python
|
selection_mouse
|
78
| 987,001
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,569
| 29
|
attn_weights_value_einsum_cls
|
python
|
selection_mouse
|
79
| 987,356
|
.venv/lib/python3.10/site-packages/flax/linen/attention.py
| 15,532
| 66
|
computing the attention weights.\n attn_weights_value_einsum_cls
|
python
|
selection_mouse
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.