Sequence
int64
1
25.2k
Time
int64
1
858M
File
stringclasses
830 values
RangeOffset
int64
0
2.21M
RangeLength
int64
0
168k
Text
stringlengths
1
4.7M
Language
stringclasses
20 values
Type
stringclasses
9 values
2
590
extension-output-pdoom-org.crowd-code-#1-crowd-code
0
0
5:21:45 PM [info] Activating crowd-code\n5:21:45 PM [info] Recording started\n5:21:45 PM [info] Initializing git provider using file system watchers...\n5:21:45 PM [info] Git repository found\n5:21:45 PM [info] Git provider initialized successfully\n5:21:45 PM [info] Initial git state: [object Object]\n
Log
tab
3
43,051
TERMINAL
0
0
undefinedmahajanm@atcremers51:~/Projects/jafar$ source .venv/bin/activate
null
terminal_command
4
45,102
TERMINAL
0
0
queue
null
terminal_command
5
45,107
TERMINAL
0
0
]633;E;watch -n1 squeue --me;9e34dcca-9884-44bd-be0a-bd365b03bcb7]633;C
null
terminal_output
6
45,238
TERMINAL
0
0
[?1049h(B[?7h]4;8;rgb:54/54/54\]4;9;rgb:FF/54/54\]4;10;rgb:54/FF/54\]4;11;rgb:FF/FF/54\]4;12;rgb:54/54/FF\]4;13;rgb:FF/54/FF\]4;14;rgb:54/FF/FF\(BEvery 1.0s: squeue --meatcremers51: Tue Jul 22 17:22:30 2025JOBID PARTITION NAME USER STTIME NODES NODELIST(REASON)(B
null
terminal_output
7
48,020
TERMINAL
0
0
salloc --time=10:00:00 --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --constraint=a40
null
terminal_command
8
48,100
TERMINAL
0
0
[?25l\r]633;A(jafar) ]633;A]0;mahajanm@atcremers51: ~/Projects/jafarmahajanm@atcremers51:~/Projects/jafar$ ]633;B]633;Bsalloc --time=10:00:00 --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --constraint=a40\r\r\n[?2004l\r]633;E;salloc --time=10:00:00 --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5 --constraint=a40;9e34dcca-9884-44bd-be0a-bd365b03bcb7]633;C[?25hsalloc: Pending job allocation 1393544\r\nsalloc: job 1393544 queued and waiting for resources\r\n
null
terminal_output
9
48,830
TERMINAL
0
0
salloc: job 1393544 has been allocated resources\r\nsalloc: Granted job allocation 1393544\r\n
null
terminal_output
10
49,010
TERMINAL
0
0
salloc: Waiting for resource configuration\r\n
null
terminal_output
11
51,804
TERMINAL
0
0
\t
null
terminal_output
12
52,985
TERMINAL
0
0
[?25l[?25h
null
terminal_output
13
53,442
TERMINAL
0
0
salloc: Nodes node17 are ready for job\r\n
null
terminal_output
14
53,782
TERMINAL
0
0
]0;mahajanm@node17: /usr/stud/mahajanm/Projects/jafar[?2004h]0;mahajanm@node17: ~/Projects/jafarmahajanm@node17:~/Projects/jafar$
null
terminal_output
15
54,548
TERMINAL
0
0
[?25lso[?25h
null
terminal_output
16
54,634
TERMINAL
0
0
[?25lo[?25h
null
terminal_output
17
54,773
TERMINAL
0
0
[?25lu[?25h
null
terminal_output
18
54,862
TERMINAL
0
0
[?25lr[?25h
null
terminal_output
19
55,035
TERMINAL
0
0
[?25lc[?25h
null
terminal_output
20
55,140
TERMINAL
0
0
[?25le[?25h
null
terminal_output
21
55,191
TERMINAL
0
0
[?25l [?25h
null
terminal_output
22
55,318
TERMINAL
0
0
[?25l.[?25h
null
terminal_output
23
55,415
TERMINAL
0
0
[?25lv[?25h
null
terminal_output
24
55,548
TERMINAL
0
0
env/
null
terminal_output
25
55,723
TERMINAL
0
0
[?25lb[?25h
null
terminal_output
26
55,856
TERMINAL
0
0
in/
null
terminal_output
27
56,191
TERMINAL
0
0
[?25la[?25h[?25lc[?25h
null
terminal_output
28
56,435
TERMINAL
0
0
tivate
null
terminal_output
29
56,994
TERMINAL
0
0
[?25l[?2004l\r]0;mahajanm@node17: /usr/stud/mahajanm/Projects/jafar[?2004h(jafar) ]0;mahajanm@node17: ~/Projects/jafarmahajanm@node17:~/Projects/jafar$ [?25h
null
terminal_output
30
66,623
slurm/jobs/mihir/horeka/yolo-runs/sampling.sh
0
0
\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/storage/user/mahajanm/Projects/world-modeling/checkpoints/tokenizer_ckpt\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\nsrun python sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4 \\n --seq_len=3 \\n --num_latent_actions=1 \\n --start_frame=0 \\n --data_dir $array_records_dir\n\n# srun python sample.py \\n # --checkpoint $dynamics_ckpt_dir \\n # --start_frame=0 \\n # --batch_size=12 \\n # --seq_len=2 \\n # --data_dir $array_records_dir\n
shellscript
tab
31
70,060
slurm/jobs/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch
0
0
#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:1\n#SBATCH --output=/storage/slurm/mahajanm/yoloruns/%x_%j.log\n#SBATCH --error=/storage/slurm/mahajanm/yoloruns/%x_%j.log\n#SBATCH --job-name=train_dynamics_overfit_sample_causal_actionspace-1\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=.\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/storage/user/mahajanm/Projects/world-modeling/checkpoints/causal/overfit-actionspace-1/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n# tokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\ntokenizer_ckpt_dir=/storage/user/mahajanm/Projects/world-modeling/checkpoints/tokenizer_ckpt\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --num_steps=2000 \\n --warmup_steps=0 \\n --wsd_decay_steps=0 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=1 \\n --init_lr=1e-4 \\n --max_lr=1e-4 \\n --log_image_interval=1000 \\n --num_latent_actions=1 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-causal-overfit-actionspace-1-$slurm_job_id \\n --tags dynamics causal overfit \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4\n
shellscript
tab
32
89,436
slurm/jobs/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch
1,577
0
null
shellscript
selection_mouse
33
89,441
slurm/jobs/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch
1,576
0
null
shellscript
selection_command
34
92,442
TERMINAL
0
0
pre-commit install
null
terminal_output
35
93,100
TERMINAL
0
0
[?25lpre-commit install\r\n[?2004l\r[?25h
null
terminal_output
36
94,074
TERMINAL
0
0
pre-commit installed at .git/hooks/pre-commit\r\n]0;mahajanm@node17: /usr/stud/mahajanm/Projects/jafar[?2004h(jafar) ]0;mahajanm@node17: ~/Projects/jafarmahajanm@node17:~/Projects/jafar$
null
terminal_output
37
180,232
slurm/jobs/mihir/horeka/overfit_sample/causal/dynamics_overfit_sample.sbatch
0
0
null
shellscript
tab
38
181,990
train_dynamics.py
0
0
from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n warmup_steps: int = 5000\n lr_schedule : str = "wsd" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = ""\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = ""\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n use_maskgit: bool = False\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = ""\n project: str = ""\n name: str = "train_dynamics"\n tags: list[str] = field(default_factory=lambda: ["dynamics"])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = ""\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = ""\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """Compute masked dynamics loss"""\n inputs["videos"] = inputs["videos"].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={"params": inputs["rng"], "dropout": inputs["dropout_rng"]},\n )\n mask = outputs["mask"]\n outputs["token_logits"] = outputs["token_logits"].astype(jnp.float32)\n outputs["recon"] = outputs["recon"].astype(jnp.float32)\n logits = outputs["token_logits"]\n targets = outputs["video_tokens"]\n\n # if not args.use_maskgit:\n # logits = outputs["token_logits"][:, :, :-1]\n # targets = outputs["video_tokens"][:, :, 1:]\n # mask = outputs["mask"][:, :, 1:] \n\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n logits, targets\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = logits.argmax(-1) == targets\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(logits)\n gt = inputs["videos"].clip(0, 1).reshape(-1, *inputs["videos"].shape[2:])\n recon = outputs["recon"].clip(0, 1).reshape(-1, *outputs["recon"].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs["lam_indices"]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs["video_tokens"]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=logits.max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs["recon"], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """Update state and compute metrics"""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics["gradients_std/"] = jax.tree.map(\n lambda x: x.std(), grads["params"]["dynamics"]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == "__main__":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError("No JAX devices found.")\n print(f"Running on {num_devices} devices.")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f"Global batch size {args.batch_size} must be divisible by "\n f"number of devices {num_devices}."\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n use_maskgit=args.use_maskgit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n "entity": args.entity,\n "project": args.project,\n "name": args.name,\n "tags": args.tags,\n "group": "debug",\n "config": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n "id": args.wandb_id,\n "resume": "allow",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({"model_param_count": param_counts})\n\n print("Parameter counts:")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=("data",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec("data", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n "model_state", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n "model_state", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n # handler_registry.add("dataloader_state", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n # handler_registry.add("dataloader_state", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith(".array_record")\n ]\n # grain_dataloader = get_dataloader(\n # array_record_files,\n # args.seq_len,\n # # NOTE: We deliberately pass the global batch size\n # # The dataloader shards the dataset across all processes\n # args.batch_size,\n # *image_shape,\n # num_workers=8,\n # prefetch_buffer_size=1,\n # seed=args.seed,\n # )\n # initial_state = grain_dataloader._create_initial_state()\n # grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n pass\n # # Restore full dynamics model\n # abstract_train_state = jax.tree_util.tree_map(\n # ocp.utils.to_shape_dtype_struct, train_state\n # )\n # restored = checkpoint_manager.restore(\n # checkpoint_manager.latest_step(),\n # args=ocp.args.Composite(\n # model_state=ocp.args.StandardRestore(abstract_train_state),\n # dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n # ),\n # )\n # train_state = restored["model_state"]\n # grain_iterator = restored["dataloader_state"]\n # step = checkpoint_manager.latest_step() or 0\n # print(f"Restored dataloader and model state from step {step}")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n # dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n # for videos in dataloader:\n videos = np.load("overfit_dir/single_sample_corner.npy") * 255.\n videos = videos.astype(np.uint8)\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n while True:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics["lr"] = lr_schedule(step)\n print(f"Step {step}, loss: {loss}")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n "loss": loss,\n "step": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs["videos"][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, "t h w c -> h (t w) c"\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n # dataloader_state=grain.checkpoint.CheckpointSave(\n # grain_iterator\n # ),\n ),\n )\n print(f"Saved checkpoint at step {step}")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n
python
tab
39
185,772
TERMINAL
0
0
[?25lgi[?25h
null
terminal_output
40
185,843
TERMINAL
0
0
[?25li[?25h
null
terminal_output
41
185,944
TERMINAL
0
0
[?25lt[?25h
null
terminal_output
42
186,048
TERMINAL
0
0
[?25l [?25h
null
terminal_output
43
186,140
TERMINAL
0
0
[?25lp[?25h
null
terminal_output
44
186,380
TERMINAL
0
0
[?25lu[?25h
null
terminal_output
45
186,579
TERMINAL
0
0
[?25ll[?25h
null
terminal_output
46
186,659
TERMINAL
0
0
[?25ll[?25h
null
terminal_output
47
186,728
TERMINAL
0
0
[?25l[?2004l\r[?25h
null
terminal_output
48
188,177
TERMINAL
0
0
remote: Enumerating objects: 27, done.\r\nremote: Counting objects: 4% (1/24)\rremote: Counting objects: 8% (2/24)\rremote: Counting objects: 12% (3/24)\rremote: Counting objects: 16% (4/24)\rremote: Counting objects: 20% (5/24)\rremote: Counting objects: 25% (6/24)\rremote: Counting objects: 29% (7/24)\rremote: Counting objects: 33% (8/24)\rremote: Counting objects: 37% (9/24)\rremote: Counting objects: 41% (10/24)\rremote: Counting objects: 45% (11/24)\rremote: Counting objects: 50% (12/24)\rremote: Counting objects: 54% (13/24)\rremote: Counting objects: 58% (14/24)\rremote: Counting objects: 62% (15/24)\rremote: Counting objects: 66% (16/24)\rremote: Counting objects: 70% (17/24)\rremote: Counting objects: 75% (18/24)\rremote: Counting objects: 79% (19/24)\rremote: Counting objects: 83% (20/24)\rremote: Counting objects: 87% (21/24)\rremote: Counting objects: 91% (22/24)\rremote: Counting objects: 95% (23/24)\rremote: Counting objects: 100% (24/24)\rremote: Counting objects: 100% (24/24), done.\r\nremote: Compressing objects: 20% (1/5)\rremote: Compressing objects: 40% (2/5)\rremote: Compressing objects: 60% (3/5)\rremote: Compressing objects: 80% (4/5)\rremote: Compressing objects: 100% (5/5)\rremote: Compressing objects: 100% (5/5), done.\r\nremote: Total 11 (delta 6), reused 11 (delta 6), pack-reused 0 (from 0)\r\nUnpacking objects: 9% (1/11)\rUnpacking objects: 18% (2/11)\rUnpacking objects: 27% (3/11)\rUnpacking objects: 36% (4/11)\rUnpacking objects: 45% (5/11)\rUnpacking objects: 54% (6/11)\rUnpacking objects: 63% (7/11)\rUnpacking objects: 72% (8/11)\rUnpacking objects: 81% (9/11)\rUnpacking objects: 90% (10/11)\rUnpacking objects: 100% (11/11)\rUnpacking objects: 100% (11/11), 1.32 KiB | 39.00 KiB/s, done.\r\n
null
terminal_output
49
188,346
TERMINAL
0
0
From github.com:p-doom/jafar\r\n 8cbd77e..b2196a7 new-arch-sampling -> origin/new-arch-sampling\r\n * [new branch] feat/actions_in_dummy_data -> origin/feat/actions_in_dummy_data\r\nUpdating 8cbd77e..b2196a7\r\nerror: Your local changes to the following files would be overwritten by merge:\r\n\ttrain_dynamics.py\r\nPlease commit your changes or stash them before you merge.\r\nAborting\r\n]0;mahajanm@node17: /usr/stud/mahajanm/Projects/jafar[?2004h(jafar) ]0;mahajanm@node17: ~/Projects/jafarmahajanm@node17:~/Projects/jafar$
null
terminal_output
50
190,787
TERMINAL
0
0
g
null
terminal_output
51
190,906
TERMINAL
0
0
[?25li[?25h
null
terminal_output
52
190,966
TERMINAL
0
0
[?25lt[?25h
null
terminal_output
53
191,055
TERMINAL
0
0
[?25l [?25h
null
terminal_output
54
192,040
TERMINAL
0
0
[?25ld[?25h
null
terminal_output
55
192,093
TERMINAL
0
0
[?25li[?25h
null
terminal_output
56
192,223
TERMINAL
0
0
[?25lf[?25h
null
terminal_output
57
192,405
TERMINAL
0
0
[?25lf[?25h
null
terminal_output
58
192,540
TERMINAL
0
0
\r\n[?2004l\r[?1h=\rdiff --git a/genie.py b/genie.py\r\nindex 3e0e2d9..1e655da 100644\r\n--- a/genie.py\r\n+++ b/genie.py\r\n@@ -347,7 +347,6 @@ class MaskGITStep(nn.Module):\r\n def restore_genie_components(\r\n train_state: TrainState,\r\n sharding: jax.sharding.NamedSharding,\r\n- grain_iterator: grain.DataLoaderIterator,\r\n inputs: Dict[str, jax.Array],\r\n rng: jax.Array,\r\n args,\r\ndiff --git a/models/lam.py b/models/lam.py\r\nindex ae0662a..3e6c536 100644\r\n--- a/models/lam.py\r\n+++ b/models/lam.py\r\n@@ -89,7 +89,7 @@ class LatentActionModel(nn.Module):\r\n patches = patchify(videos, self.patch_size)\r\n action_pad = jnp.broadcast_to(self.action_in, (B, T, 1, self.patch_token_dim))\r\n # FIXME mihir do this the other way around\r\n- padded_patches = jnp.concatenate((action_pad, patches), axis=2)\r\n+ padded_patches = jnp.concatenate((patches, action_pad), axis=2)\r\n \r\n # --- Encode ---\r\n z = self.encoder(padded_patches) # (B, T, N, E)\r\ndiff --git a/train_dynamics.py b/train_dynamics.py\r\n:
null
terminal_output
59
194,412
TERMINAL
0
0
\rindex df1446a..40c0d4e 100644\r\n:
null
terminal_output
60
194,592
TERMINAL
0
0
\r--- a/train_dynamics.py\r\n:
null
terminal_output
61
194,762
TERMINAL
0
0
\r+++ b/train_dynamics.py\r\n:
null
terminal_output
62
194,907
TERMINAL
0
0
\r@@ -95,6 +95,7 @@ def dynamics_loss_fn(params, state, inputs):\r\n:
null
terminal_output
63
195,003
TERMINAL
0
0
\r )\r\n:
null
terminal_output
64
195,184
TERMINAL
0
0
\r mask = outputs["mask"]\r\n:
null
terminal_output
65
195,852
TERMINAL
0
0
\r outputs["token_logits"] = outputs["token_logits"].astype(jnp.float32)\r\n:
null
terminal_output
66
196,055
TERMINAL
0
0
\r+ outputs["recon"] = outputs["recon"].astype(jnp.float32)\r\n:
null
terminal_output
67
196,200
TERMINAL
0
0
\r logits = outputs["token_logits"]\r\n:
null
terminal_output
68
196,338
TERMINAL
0
0
\r targets = outputs["video_tokens"]\r\n:
null
terminal_output
69
196,480
TERMINAL
0
0
\r \r\n:
null
terminal_output
70
196,625
TERMINAL
0
0
\r@@ -263,8 +264,8 @@ if __name__ == "__main__":\r\n:
null
terminal_output
71
196,746
TERMINAL
0
0
\r handler_registry.add(\r\n:
null
terminal_output
72
196,890
TERMINAL
0
0
\r "model_state", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\r\n:
null
terminal_output
73
197,052
TERMINAL
0
0
\r )\r\n:
null
terminal_output
74
197,187
TERMINAL
0
0
\r[?1l>
null
terminal_output
75
197,587
TERMINAL
0
0
g
null
terminal_output
76
197,727
TERMINAL
0
0
[?25li[?25h
null
terminal_output
77
198,110
TERMINAL
0
0
]0;mahajanm@node17: /usr/stud/mahajanm/Projects/jafar[?2004h(jafar) ]0;mahajanm@node17: ~/Projects/jafarmahajanm@node17:~/Projects/jafar$ gi
null
terminal_output
78
198,989
TERMINAL
0
0
[?25lg[?25h
null
terminal_output
79
199,084
TERMINAL
0
0
[?25li[?25h
null
terminal_output
80
199,212
TERMINAL
0
0
[?25lt[?25h
null
terminal_output
81
199,334
TERMINAL
0
0
[?25l [?25h
null
terminal_output
82
199,413
TERMINAL
0
0
[?25ls[?25h
null
terminal_output
83
199,506
TERMINAL
0
0
[?25lt[?25h
null
terminal_output
84
199,590
TERMINAL
0
0
[?25la[?25h
null
terminal_output
85
199,678
TERMINAL
0
0
[?25ls[?25h
null
terminal_output
86
199,778
TERMINAL
0
0
[?25lh[?25h
null
terminal_output
87
199,990
TERMINAL
0
0
\r\n[?2004l\r
null
terminal_output
88
202,208
TERMINAL
0
0
[?25lgi[?25h
null
terminal_output
89
202,257
TERMINAL
0
0
[?25li[?25h
null
terminal_output
90
202,461
TERMINAL
0
0
[?25lt[?25h
null
terminal_output
91
202,526
TERMINAL
0
0
[?25l [?25h
null
terminal_output
92
202,603
TERMINAL
0
0
[?25lp[?25h
null
terminal_output
93
202,781
TERMINAL
0
0
[?25lu[?25h
null
terminal_output
94
202,969
TERMINAL
0
0
[?25ll[?25h
null
terminal_output
95
203,090
TERMINAL
0
0
[?25ll[?25h
null
terminal_output
96
203,226
TERMINAL
0
0
\r\n
null
terminal_output
97
203,956
TERMINAL
0
0
[?25lgi[?25h
null
terminal_output
98
204,043
TERMINAL
0
0
[?25li[?25h
null
terminal_output
99
204,146
TERMINAL
0
0
[?25lt[?25h
null
terminal_output
100
204,292
TERMINAL
0
0
[?25l [?25h
null
terminal_output
101
204,383
TERMINAL
0
0
[?25ls[?25h
null
terminal_output